1 import copy
2 import string
3 from teamwork.math.probability import Distribution
4 from teamwork.math.Keys import StateKey,keyConstant
5 from teamwork.math.matrices import epsilon
6 from teamwork.math.KeyedVector import KeyedVector
7 from teamwork.math.KeyedMatrix import KeyedMatrix
8 from teamwork.math.KeyedTree import KeyedTree
9 from Multiagent import MultiagentSystem
10 from xml.dom.minidom import *
11
13 """Base multiagent class that provides rudimentary state and turn-taking infrastructure
14 @ivar order: the base turn-taking order, which is the order set at the beginning of the simulation
15 @type order: L{KeyedVector}
16 @ivar _turnDynamics: the dynamics of turn taking
17 @type _turnDynamics: L{teamwork.math.KeyedTree.KeyedTree}
18 @cvar turnFeature: the name of the feature labeling the turn status of each agent
19 @type turnFeature: C{str}
20 @ivar time: the number of epochs passed
21 @type time: C{int}
22 @cvar threshold: the activation threshold for the agent's L{turnFeature} valueb
23 """
24 turnFeature = '_turn'
25 threshold = 0.5
26
46
60
62 """
63 @return: dictionary containing relevant state keys
64 @rtype: C{dict:L{StateKey}S{->}boolean}
65 """
66 return self.state.domainKeys()
67
69 """
70 @return: the probability over the current states of all member agents
71 @rtype: L{Distribution}"""
72 return self.state
73
75 """Removes the given feature from the state vector
76 @param entity: the entity on whom the feature currently exists
77 @type entity: str
78 @param feature: the state feature to remove
79 @type feature: str
80 """
81 key = StateKey({'entity':entity,'feature':feature})
82 self.state = self.state.marginalize(key)
83
85 """
86 @return: the sequence of entities who will act in this simulaton
87 @rtype: str[]
88 """
89 entries = self.order.items()
90 entries.sort(lambda x,y:-cmp(x[1],y[1]))
91 return map(lambda k:k['entity'],
92 filter(lambda k:isinstance(k,StateKey),
93 map(lambda x:x[0],entries)))
94
96 """Re-initializes the turn-taking order, in case of any addition/deletion of agents"""
97 self.order = self.generateOrder()
98 self._turnDynamics.clear()
99
101 """Creates a new order vector
102 @return: the turn state vector suitable for the initial state of the simulation
103 @rtype: L{KeyedVector}
104 """
105 self._turnDynamics.clear()
106 if entities is None:
107 entities = self.activeMembers()
108 entities.sort()
109 order = KeyedVector()
110 for agent in entities:
111 order[StateKey({'entity':agent.name,
112 'feature':self.turnFeature})] = 1.
113 order.freeze()
114 return order
115
116 - def next(self,order=None):
117 """Computes the active agents in the next turn by determining which agents have an activation greater than L{threshold} in the turn state
118 @param order: the order vector to use as the basis for computing the turn result (defaults to the current system turn state)
119 @type order: L{KeyedVector}
120 @return: the names of those agents whose turn it is now. Each of the agents will thus act in parallel
121 @rtype: C{str[]}
122 """
123 if order is None:
124 order = self.order
125 best = {'value':-1.}
126 for key,value in order.items():
127 if key.has_key('feature') and key['feature'] == self.turnFeature:
128 entry = {'name':key['entity']}
129 if value > best['value'] + epsilon:
130
131 best = {'value':value,'agents':[entry]}
132 elif value + epsilon > best['value']:
133
134 best['agents'].append(entry)
135 return best['agents']
136
138 """Computes the (possibly cached) change in turn due to the specified actions
139 @param actions: the actions being performed, indexed by actor name
140 @type actions: C{dict:strS{->}L{Action<teamwork.action.PsychActions.Action>}[]}
141 @return: the dynamics of change to the standing turn order based on the specified actions, suitable for passing to L{applyTurn} to actually implement the changes
142 @rtype: L{KeyedTree<teamwork.math.KeyedTree.KeyedTree>}
143 """
144 actionKey = string.join(map(str,actions.values()))
145 if not self._turnDynamics.has_key(actionKey):
146 tree = self.createTurnDynamics(actions)
147 tree.fill(self.order.keys())
148 tree.freeze()
149 self._turnDynamics[actionKey] = tree
150 return self._turnDynamics[actionKey]
151
153 """
154 @return: those agents who are able to take actions
155 @rtype: L{Agent<teamwork.agent.Agent.Agent>}[]
156 """
157 entities = []
158 for agent in self.members():
159 if len(agent.actions.getOptions()) > 0:
160 entities.append(agent)
161 return entities
162
164 """
165 @param level: The belief depth at which all agents will have their policies compiled, where 0 is the belief depth of the real agent. If the value of this flag is I{n}, then all agents at belief depthS{>=}I{n} will have their policies compiled, while no agents at belief depth<I{n} will.
166 @type level: int
167 @return: the total number of actors (including recursive beliefs) within this scenario
168 @rtype: int
169 """
170 count = 0
171 flag = False
172 for agent in self.members():
173 if not flag and agent.beliefDepth() >= level:
174 flag = True
175 count += agent.entities.actorCount()
176 if flag:
177 count += len(self.activeMembers())
178 return count
179
181 """
182 @param descend: flag, if True, the count includes actions in recursive beliefs; otherwise, not.
183 @return: the total number of actions within this scenario
184 @rtype: int
185 """
186 count = 0
187 for agent in self.members():
188 if descend:
189 count += agent.entities.actionCount()
190 count += len(agent.actions.getOptions())
191 return count
192
194 """Generates all possible joint actions out of the given agents
195 @param agents: the agents eligible for action (defaults to currently eligible agents)
196 @type agents: L{Agent<teamwork.agent.Agent.Agent>}[]
197 """
198 if agents is None:
199 agents = self.next()
200 if result is None:
201 result = [{}]
202 if len(agents) == 0:
203 return result
204 else:
205 turn = agents.pop()
206 agent = self[turn['name']]
207 newResult = []
208 try:
209 choices = turn['choices']
210 except KeyError:
211 choices = agent.actions.getOptions()
212 for action in choices:
213 for set in result:
214 newSet = copy.copy(set)
215 newSet[agent.name] = action
216 newResult.append(newSet)
217 return self.generateActions(agents,newResult)
218
220 """Computes the change in turn due to the specified actions
221 @param actions: the actions being performed, indexed by actor name
222 @type actions: C{dict:strS{->}L{teamwork.action.PsychActions.Action}[]}
223 @return: the dynamics of change to the standing turn order based on the specified actions, suitable for passing to L{applyTurn} to actually implement the changes
224 @rtype: L{KeyedTree<teamwork.math.KeyedTree.KeyedTree>}
225 """
226 matrix = KeyedMatrix()
227 for key in self.order.keys():
228 row = KeyedVector()
229 row[key] = 1.
230 matrix[key] = row
231 tree = KeyedTree()
232 tree.makeLeaf(matrix)
233 return tree
234
236 """Applies provided turn changes
237 @param delta: changes, as computed by L{updateTurn}
238 @type delta: C{L{KeyedTree<teamwork.math.KeyedTree.KeyedTree>}}
239 @param beliefs: the belief dictionary to be updated (defaults to this actual L{Simulation})
240 @type beliefs: dict
241 """
242 if beliefs is None:
243 self.order = delta[self.order] * self.order
244 self.time += 1
245 if self.saveHistory:
246 self.history.append(self.state.expectation())
247 else:
248 beliefs['turn'] = delta[beliefs['turn']] * beliefs['turn']
249
251 doc = MultiagentSystem.__xml__(self)
252 doc.documentElement.setAttribute('time',str(self.time))
253
254 node = doc.createElement('state')
255 doc.documentElement.appendChild(node)
256 node.appendChild(self.state.__xml__().documentElement)
257
258 node = doc.createElement('order')
259 doc.documentElement.appendChild(node)
260 node.appendChild(self.order.__xml__().documentElement)
261 return doc
262
263 - def parse(self,element,agentClass=None):
264 MultiagentSystem.parse(self,element,agentClass)
265 try:
266 self.time = int(element.getAttribute('time'))
267 except ValueError:
268 self.time = 0
269 self.initializeOrder()
270 child = element.firstChild
271 while child:
272 if child.nodeType == Node.ELEMENT_NODE:
273 if child.tagName == 'state':
274 subNodes = child.getElementsByTagName('distribution')
275 if len(subNodes) == 1:
276 self.state.parse(subNodes[0],KeyedVector)
277 elif len(subNodes) > 1:
278 raise UserWarning,'Multiple distributions in state'
279 elif child.tagName == 'order':
280 subNodes = child.getElementsByTagName('vector')
281 if len(subNodes) == 1:
282 self.order.parse(subNodes[0],True)
283 elif len(subNodes) > 1:
284 raise UserWarning,'Multiple vectors in turn sequence'
285 else:
286 raise UserWarning,'Missing vector in turn sequence'
287 child = child.nextSibling
288
295
303