Commit 2afd8445 authored by Jayant Khatkar's avatar Jayant Khatkar

improved plotting, state_storer fixed

parent c883a38a
from DecMCTS import Tree
from copy import deepcopy
from copy import copy, deepcopy
import numpy as np
import numpy.random as rand
from planning_utils import *
......@@ -63,7 +63,10 @@ class State:
def state_storer(data, parent_state, action):
if parent_state == None:
return State(Position(0,0,0)) # This state is also used Null action when calculating local reward
state = State(action.start_location) # NOTE THIS WILL NEED TO CHANGE WITH CONTOURS
state = State(action.start_location)
state.actions = copy(parent_state.actions)
state.time_so_far = parent_state.time_so_far
state.time_wasted = parent_state.time_wasted
state.append(action)
return state
......@@ -95,6 +98,18 @@ def reward(dat, states):
comm_n = 5
n_robots = 2
def trees2Plan(trees):
"""
extract the best action sequence from each tree
and convert it into a Plan object
"""
plan = Plan(list(range(len(trees))))
for i in range(len(trees)):
plan.actions[i] = trees[i].my_act_dist.best_action().actions
plan.time[i] = trees[i].my_act_dist.best_action().time_so_far
return plan
# Plot function
def plot_actions(actions, plan = None):
"""
......@@ -108,8 +123,15 @@ def plot_actions(actions, plan = None):
ss = [p.time for p in actions]
ss = [(p-min(ss))*(max_size-min_size)+min_size for p in ss]
plt.scatter(xs, ys, s = ss)
if plan is not None:
for r in plan.robots:
xs = [0]+[p.start_location[0] for p in plan.actions[r]]
ys = [0]+[p.start_location[1] for p in plan.actions[r]]
plt.plot(xs,ys)
plt.show()
if __name__=="__main__":
trees = [None]*n_robots
for i in range(n_robots):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment