@article{oai:nitech.repo.nii.ac.jp:00005095, author = {松井, 藤五郎 and 犬塚, 信博 and 世木, 博久 and 伊藤, 英則}, issue = {2}, journal = {人工知能学会論文誌 / 人工知能学会}, month = {Nov}, note = {Conventional reinforcement learning has focused on learning in a stable environment. However, an agent may be given another environment which differs from the old environment. Thus, an autonomous agent needs a method to learn efficiently a new policy suited for the new environment. In this paper, we propose a method to adapt to a new environment for an agent which has a task to reach goals. When an agent is provided with a new environment, our method learns a new partial policy using the precondition of agent’s old policy. The precondition of a policy is a condition that says what must be satisfied in order to reach goals by using the policy. Similarly to learning the precondition of an action from the instances of action’s success or failure by using concept learning, our method learns the precondition of a policy from the instances of policy’s success or failure by using concept learning. We describe a method using inductive logic programming (ILP) as a concept learning method. Since ILP provides methods for learning relational knowledge that is not expressible in attribute-value learning, our method can use relational representation for the precondition. We applied our method to a blocks-world problem for evaluation. We have come to conclusion that our method is effective when the cost to carry out the task is high., application/pdf}, pages = {135--144}, title = {強化学習結果の再構築への概念学習の適用}, volume = {17}, year = {2002}, yomi = {イヌヅカ, ノブヒロ and セキ, ヒロヒサ} }