diff --git a/AP-Jupyternotebook.ipynb b/AP-Jupyternotebook.ipynb index af23b264e6280bdbc609630e3bde3d798d47ea4b..2fc688eb9fed6f572255b5d88d640733938837c9 100644 --- a/AP-Jupyternotebook.ipynb +++ b/AP-Jupyternotebook.ipynb @@ -123,11 +123,11 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "metadata": { "ExecuteTime": { - "end_time": "2020-05-17T01:55:20.718217Z", - "start_time": "2020-05-17T01:55:20.707945Z" + "end_time": "2020-05-17T17:05:29.902684Z", + "start_time": "2020-05-17T17:05:29.899973Z" } }, "outputs": [], @@ -322,11 +322,11 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": { "ExecuteTime": { - "end_time": "2020-05-16T18:04:16.102453Z", - "start_time": "2020-05-16T18:04:16.098534Z" + "end_time": "2020-05-17T17:04:38.756056Z", + "start_time": "2020-05-17T17:04:38.514619Z" }, "deletable": false, "nbgrader": { @@ -616,11 +616,11 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 76, "metadata": { "ExecuteTime": { - "end_time": "2020-05-16T00:40:57.167090Z", - "start_time": "2020-05-16T00:40:56.975360Z" + "end_time": "2020-05-17T04:19:41.932269Z", + "start_time": "2020-05-17T04:19:41.904592Z" }, "deletable": false, "nbgrader": { @@ -636,31 +636,14 @@ }, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Expected 6, got: 6. Correct!\n", - "Expected 0, got: 0. Correct!\n", - "Expected 2, got: 2. Correct!\n", - "Expected 2, got: 2. Correct!\n", - "Expected 1, got: 1. Correct!\n", - "Expected 2, got: 2. Correct!\n", - ":constants is not recognized in domain\n", - "Expected 4, got: 4. Correct!\n", - ":constants is not recognized in domain\n", - "Expected 4, got: 4. Correct!\n", - "------------------------------\n", - "Expected 38, got: 38. Correct!\n", - "Expected 0, got: 0. Correct!\n", - "Expected 8, got: 8. Correct!\n", - "Expected 8, got: 8. Correct!\n", - "Expected 2, got: 2. Correct!\n", - "Expected 2, got: 2. Correct!\n", - ":constants is not recognized in domain\n", - "Expected 7, got: 7. Correct!\n", - ":constants is not recognized in domain\n", - "Expected 10, got: 10. Correct!\n", - "------------------------------\n" + "ename": "NameError", + "evalue": "name 'MaxHeuristic' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 46\u001b[0;31m \u001b[0mh\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mMaxHeuristic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 47\u001b[0m \u001b[0mtest_heuristic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdwr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpb1_dwr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m6\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0mtest_heuristic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdwr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpb2_dwr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNameError\u001b[0m: name 'MaxHeuristic' is not defined" ] } ], @@ -734,15 +717,15 @@ "\n", "print(\"-\" * 30)\n", "\n", - "# h = FastForwardHeuristic()\n", - "# test_heuristic(dwr, pb1_dwr, h, 16)\n", - "# test_heuristic(dwr, pb2_dwr, h, 0)\n", - "# test_heuristic(tsp, pb1_tsp, h, 5)\n", - "# test_heuristic(tsp, pb2_tsp, h, 5)\n", - "# test_heuristic(dinner, pb1_dinner, h, 2)\n", - "# test_heuristic(dompteur, pb1_dompteur, h, 2)\n", - "# test_heuristic(logistics, pb1_logistics, h, 5)\n", - "# test_heuristic(logistics, pb2_logistics, h, 5)" + "h = FastForwardHeuristic()\n", + "test_heuristic(dwr, pb1_dwr, h, 16)\n", + "test_heuristic(dwr, pb2_dwr, h, 0)\n", + "test_heuristic(tsp, pb1_tsp, h, 5)\n", + "test_heuristic(tsp, pb2_tsp, h, 5)\n", + "test_heuristic(dinner, pb1_dinner, h, 2)\n", + "test_heuristic(dompteur, pb1_dompteur, h, 2)\n", + "test_heuristic(logistics, pb1_logistics, h, 5)\n", + "test_heuristic(logistics, pb2_logistics, h, 5)" ] }, { @@ -1352,7 +1335,7 @@ "\n", "class CriticalPathHeuristic(Heuristic):\n", " \"\"\"\n", - " Haslum\"s H^m Heuristic\n", + " Haslum's H^m Heuristic\n", " \"\"\"\n", "\n", " def __init__(self, m, stats=None):\n", @@ -1441,11 +1424,11 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": 8, "metadata": { "ExecuteTime": { - "end_time": "2020-05-17T03:39:02.794775Z", - "start_time": "2020-05-17T03:39:02.764628Z" + "end_time": "2020-05-17T17:05:47.819217Z", + "start_time": "2020-05-17T17:05:47.809965Z" }, "deletable": false, "nbgrader": { @@ -1503,9 +1486,9 @@ " if applicable(\n", " state, (action.positive_preconditions, action.negative_preconditions)\n", " ):\n", - " state = apply(state, (action.add_effects, action.del_effects)).union(state)\n", + " state = apply(state, (action.add_effects, action.del_effects))\n", " break\n", - "\n", + " \n", " goals_reached = goals[0].intersection(state)\n", " return goals_reached == goals[0]" ] @@ -1532,11 +1515,11 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": 9, "metadata": { "ExecuteTime": { - "end_time": "2020-05-17T03:39:04.126609Z", - "start_time": "2020-05-17T03:39:03.957513Z" + "end_time": "2020-05-17T17:16:56.846648Z", + "start_time": "2020-05-17T17:16:56.752994Z" }, "deletable": false, "editable": false, @@ -1741,11 +1724,11 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 12, "metadata": { "ExecuteTime": { - "end_time": "2020-05-16T18:04:38.160670Z", - "start_time": "2020-05-16T18:04:38.154065Z" + "end_time": "2020-05-17T17:42:25.097593Z", + "start_time": "2020-05-17T17:42:25.073069Z" }, "deletable": false, "nbgrader": { @@ -1779,43 +1762,51 @@ " goals: tuple with (positive predicates, negative predicates) of the goal\n", " \"\"\"\n", " frontier = queue.PriorityQueue()\n", - " came_from = {}\n", - " cost_so_far = {}\n", + " \n", + " parent_state = {}\n", + " state_cost = {}\n", + " action_applied = {}\n", + " heuristic_at = {}\n", + "\n", + " parent_state[state] = None\n", + " state_cost[state] = 0\n", + " action_applied[state] = None\n", + " heuristic_at[state] = self.h(actions, state, goals)\n", + "\n", " frontier.put(state, 0)\n", - " came_from[state] = None\n", - " cost_so_far[state] = 0\n", - " goals_missing = goals[0]\n", "\n", " while not frontier.empty():\n", " current_state = frontier.get()\n", - " goals_reached = goals_missing.intersection(current_state)\n", - " if goals_reached:\n", - " goals_missing = goals_missing.difference(goals_reached)\n", - " if not goals_missing:\n", - " # Get the backward path.\n", - " path = []\n", - " while came_from[current_state]:\n", - " path.append(current_state)\n", - " current_state = came_from[current_state]\n", - " # path.append(current)\n", - " # path.reverse()\n", - " return path\n", + "\n", + " # Test whether the goals have been reached.\n", + " if goals[0].issubset(current_state):\n", + " current = current_state\n", + " # Get the backward path.\n", + " path = []\n", + " while current is not None:\n", + " path.append(current)\n", + " current = parent_state[current]\n", + " path.reverse()\n", + " return path\n", "\n", " for action in actions:\n", " # Get actions applicable to current state.\n", " if applicable(\n", - " current_state, (action.positive_preconditions, action.negative_preconditions)\n", + " current_state, (action.positive_preconditions, action.negative_preconditions),\n", " ):\n", - " new_state = apply(current_state, (action.add_effects, action.del_effects))\n", + " next_state = apply(current_state, (action.add_effects, action.del_effects))\n", " # +1 because this is the cost to move from one state to another.\n", - " # When not used, the result is the same.\n", - " cost = cost_so_far[current_state] + 1\n", - " if new_state not in cost_so_far or cost < cost_so_far[new_state]:\n", - " cost_so_far[new_state] = cost\n", - " priority = cost + self.h(actions, new_state, goals)\n", - " frontier.put(new_state, priority)\n", - " came_from[new_state] = current_state\n", - "\n", + " cost_next_state = state_cost[current_state] + 1\n", + " if next_state not in state_cost or cost_next_state < state_cost[next_state]:\n", + " heuristic_value = self.h(actions, next_state, goals)\n", + " if heuristic_value == float(\"inf\"):\n", + " # State non-reachable.\n", + " continue\n", + " state_cost[next_state] = cost_next_state\n", + " heuristic_at[next_state] = heuristic_value\n", + " parent_state[next_state] = current_state\n", + " action_applied[next_state] = action\n", + " frontier.put(next_state, cost_next_state + heuristic_value)\n", " return None" ] }, @@ -1841,11 +1832,11 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 13, "metadata": { "ExecuteTime": { - "end_time": "2020-05-16T18:04:55.198053Z", - "start_time": "2020-05-16T18:04:46.178756Z" + "end_time": "2020-05-17T17:43:03.270695Z", + "start_time": "2020-05-17T17:42:27.781616Z" }, "deletable": false, "nbgrader": { @@ -1861,11 +1852,19 @@ }, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Expected 17, got: 174. False!\n", - "Expected 0, got: 0. Correct!\n" + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mplanner\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mHeuristicPlanner\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mplan\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtime\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mplanner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msolve_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdwr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpb1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Expected 17, got:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mplan\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m\". Correct!\"\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mplan\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m17\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m\". False!\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mplan\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtime\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mplanner\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msolve_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdwr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpb2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/water/files/Projects/heuristic-planning-2020-1-claudioscheer/pddl/pddl_planner.py\u001b[0m in \u001b[0;36msolve_file\u001b[0;34m(self, domainfile, problemfile)\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstats\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstats\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_space\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mground_actions\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# compute stats\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 33\u001b[0;31m plan = self.solve(\n\u001b[0m\u001b[1;32m 34\u001b[0m \u001b[0mground_actions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mparser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpositive_goals\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnegative_goals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 35\u001b[0m )\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36msolve\u001b[0;34m(self, actions, state, goals)\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0mcost_next_state\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstate_cost\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mcurrent_state\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mnew_state\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mstate_cost\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mcost_next_state\u001b[0m \u001b[0;34m<\u001b[0m \u001b[0mstate_cost\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mnew_state\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 56\u001b[0;31m \u001b[0mheuristic_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mh\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mactions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgoals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 57\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mheuristic_value\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mfloat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"inf\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m \u001b[0;31m# State non-reachable.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/media/water/files/Projects/heuristic-planning-2020-1-claudioscheer/pddl/heuristic.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, actions, initial_state, goals)\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstats\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstats\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mh_calls\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mh\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mactions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minitial_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgoals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mh\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdomain\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minitial_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgoals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36mh\u001b[0;34m(self, actions, state, goals)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mgoals_missing\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0missubset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreachable\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;31m# Get all actions applicable to the current state level.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mlast_state\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfrozenset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0ma\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mactions\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpositive_preconditions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0missubset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreachable\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 13\u001b[0m \u001b[0;31m# The next state will contain all the actions from previous states,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# plus the effects actions when executing the actions applicable to the current state.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mgoals_missing\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0missubset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreachable\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;31m# Get all actions applicable to the current state level.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0mlast_state\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfrozenset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0ma\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mactions\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpositive_preconditions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0missubset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreachable\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 13\u001b[0m \u001b[0;31m# The next state will contain all the actions from previous states,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# plus the effects actions when executing the actions applicable to the current state.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " ] } ], diff --git a/paper/paper.pdf b/paper/paper.pdf index edf23089249d25cc1c3a089bb0ea56df5e7f2411..97f1aec1501448f1679db60d82f0eb02425103c3 100644 Binary files a/paper/paper.pdf and b/paper/paper.pdf differ diff --git a/paper/paper.tex b/paper/paper.tex index 3f742c9c028927578cad2244747064cf6680bf1f..35870d49445536d75e628728e9abec7377251edf 100644 --- a/paper/paper.tex +++ b/paper/paper.tex @@ -60,9 +60,9 @@ Six domains were tested in the implementations. In this section, I explain this \subsection{Blocksworld} \subsection{Dinner} \subsection{Dompteur} -\subsection{DWR} +\subsection{DWR - Dock Worker Robots} \subsection{Logistics} -\subsection{TSP} +\subsection{TSP - Travel Sales Person} \section{Heuristics}