Package teamwork :: Package multiagent :: Module PsychAgents
[hide private]
[frames] | no frames]

Source Code for Module teamwork.multiagent.PsychAgents

   1  """PsychSim scenario class""" 
   2  from Queue import Queue 
   3  import string 
   4  import threading 
   5  import time 
   6  from xml.dom.minidom import * 
   7   
   8  from teamwork.utils.Debugger import * 
   9  from teamwork.math.KeyedMatrix import makeIdentityMatrix 
  10  from teamwork.math.probability import * 
  11  from teamwork.math.ProbabilityTree import * 
  12  from teamwork.dynamics.pwlDynamics import * 
  13  from Simulation import MultiagentSimulation 
  14  from Multiagent import MultiagentSystem 
  15   
  16  lookaheadCount = {} 
  17  ##import numpy.linalg.linalg 
  18   
19 -class PsychAgents(MultiagentSimulation):
20 """ 21 A PsychSim scenario 22 23 0. Create a scenario from a list of Agent objects 24 - C{scenario = L{PsychAgents}(entityList)} 25 26 1. If the members have been created from elements of a L{GenericSociety}, then the following applies those generic models to the member L{teamwork.agent.Agent} objects: 27 - C{scenario.L{applyDefaults}()} 28 29 2. Access a (possibly recursively nested) member agent model 30 - C{agent = scenario[name]} 31 32 3. Run the simulation 33 - C{result = scenario.L{performMsg}(msg,sender,receivers,hearers)} 34 - C{result = scenario.L{performAct}(actions)} 35 - C{result = scenario.L{microstep}()} 36 37 4. Access the scenario state distribution 38 - C{distribution = scenario.L{getState}()} 39 40 5. Access the system dynamics 41 - C{tree = scenario.L{getDynamics}(actions)} 42 43 @cvar observationDecay: the rate at which an observation flag decays on each subsequent epoch (1 means no decay, 0 means instant amnesia) 44 @type observationDecay: C{float} 45 @cvar threaded: if C{True}, then execution will be multithreaded 46 @type threaded: boolean 47 """ 48 observationDecay = 0.5 49 __VERIFY__ = True 50 threaded = False 51
52 - def __init__(self,entities=[]):
53 """ 54 @param entities: list or dictionary of L{Agent<teamwork.agent.Agent>} instances 55 """ 56 self.society = None 57 MultiagentSimulation.__init__(self,entities) 58 self.actions = None 59 self.actionMatrix = None 60 # Sequence of changes over time 61 self.log = [] 62 # Scenario goals 63 self.objectives = []
64
65 - def initialize(self):
66 self.compileDynamics()
67
68 - def addMember(self,agent):
69 """Adds the agent to this collection 70 @param agent: the agent to add 71 @type agent: L{Agent<teamwork.agent.Agent>} 72 @warning: will clobber any pre-existing agent with the same name 73 """ 74 MultiagentSimulation.addMember(self,agent) 75 # Hook up current agents' beliefs to this new agent 76 eList = self.members() 77 while len(eList) > 0: 78 other = eList.pop() 79 if other.name == agent.name: 80 other.dynamics.update(agent.dynamics) 81 eList += other.entities.members() 82 # Hook up all of this new agent's beliefs to this world 83 eList = [agent] 84 while len(eList) > 0: 85 agent = eList.pop() 86 agent.entities.dynamics = self.dynamics 87 agent.entities.society = self.society 88 if self.has_key(agent.name): 89 agent.dynamics.update(self[agent.name].dynamics) 90 eList += agent.entities.members()
91
92 - def getActions(self):
93 """ 94 @return: the current observation vector 95 @rtype: L{KeyedVector} 96 """ 97 if self.actions is None: 98 self.actions = KeyedVector() 99 for agent in self.members(): 100 for option in agent.actions.getOptions(): 101 for action in option: 102 key = ActionKey({'type':action['type'], 103 'entity':action['actor'], 104 'object':action['object']}) 105 self.actions[key] = 0. 106 self.actions[keyConstant] = 1. 107 return self.actions
108
109 - def applyDefaults(self,progress=None,total=None,doBeliefs=True):
110 """Applies the relevant generic models to the member agent models 111 @param progress: optional C{Queue} argument is used to give progress updates, in the form of C{(label,pct)}, where: 112 - I{label}: a string label to display for the current task 113 - I{pct}: an integer percentage (1-100) representing the amount of progress made since the last call 114 @type progress: Queue 115 @param total: number of total actions to be progressed through 116 @type total: float 117 @param doBeliefs: if C{True}, the create recursive beliefs (default is C{True} 118 @type doBeliefs: bool 119 """ 120 for entity in self.members(): 121 assert entity.entities.society is self.society 122 # Set up relationships and models 123 entity.initRelationships(self.members()) 124 # Instantiate the observation function with the new entities 125 entity.initObservations(self.members()) 126 for entity in self.members(): 127 # Add any actions that refer to other entities 128 entity.addActions(self.members()) 129 # Stick the actor in, but we should really do this somewhere else 130 for action in entity.actions.getOptions(): 131 for subAct in action: 132 subAct['actor'] = entity.name 133 for entity in self.members(): 134 # Initialize dynamics 135 entity.initDynamics(self) 136 # Initialize mental models 137 entity.initModels(self.members()) 138 # Initialize every entity's beliefs about the others 139 for entity in self.members(): 140 if progress: 141 msg = 'Initializing beliefs of %s' % (entity.ancestry()) 142 count = pow(len(self.members()),entity.getDefault('depth')) 143 progress.put((msg,0)) 144 if doBeliefs: 145 # Recursive beliefs 146 entity.initEntities(self.members()) 147 else: 148 # Don't recurse 149 entity.initEntities(self.members(),0) 150 if progress: 151 try: 152 progress.put((msg,max(1,100.*float(count)/total))) 153 except ZeroDivisionError: 154 progress.put((msg,max(1,10.*float(count)))) 155 # Initialize turn order 156 self.initializeOrder() 157 self.applyOrder() 158 if self.saveHistory: 159 self.history.append(self.state.expectation())
160
161 - def applyOrder(self,entities=None):
162 """Applies any generic society's order specification to this scenario""" 163 if entities is None and self.society: 164 order = {} 165 newEntities = self.activeMembers() 166 for agent in newEntities: 167 agent.entities.applyOrder(entities) 168 order[agent.name] = [] 169 for index in range(len(self.society._keys)): 170 if agent.instanceof(self.society._keys[index]): 171 order[agent.name].append(index) 172 newEntities.sort(lambda x,y:cmp(order[x.name],order[y.name])) 173 entities = newEntities 174 self.order = self.generateOrder(entities)
175
176 - def microstep(self,turns=[],hypothetical=False,explain=False, 177 suggest=False,debug=Debugger()):
178 """Step forward by the action of the given entities 179 @param turns: the agents to act next (defaults to result of L{next}), each entry in the list should be a dictionary: 180 - I{name}: the name of the agent to act 181 - I{choices}: the list of possible options this agent can consider in this turn (defaults to the list of all possible actions if omitted, or if the list is empty) 182 @type turns: C{dict[]} 183 @param hypothetical: if C{True}, then this is only a hypothetical microstep; otherwise, it is real 184 @type hypothetical: bool 185 @param explain: if C{True}, then add an explanation to the result 186 @type explain: bool 187 @param suggest: if C{True}, then suggest possible belief changes to ensure that the behavior in this step meets whatever objectives have been specifieid (default is C{False}) 188 @type suggest: bool 189 @note: if C{explain} is C{False}, then C{suggest} is treated as C{False} 190 @return: a dictionary of results: 191 - decision: the dictionary of actions performed in this turn 192 - delta: the changes to the state (suitable for passing in to applyChanges) 193 - explanation: an XML document explaining what happened in this step and why 194 """ 195 # Build up the list of selected actions 196 actionDict = {} 197 # If no agents provided, then use default turn-taking order 198 if not turns: 199 turns = self.next() 200 doc = Document() 201 root = doc.createElement('step') 202 doc.appendChild(root) 203 root.setAttribute('time',str(self.time+1)) 204 root.setAttribute('hypothetical',str(hypothetical)) 205 for turn in turns: 206 if isinstance(turn,dict): 207 name = turn['name'] 208 actor = self[name] 209 try: 210 choices = turn['choices'] 211 except KeyError: 212 choices = [] 213 try: 214 history = turn['history'] 215 except KeyError: 216 history = {} 217 else: 218 raise DeprecationWarning,'Turns should be expressed in dictionary form' 219 if len(choices) == 0: 220 for option in actor.actions.getOptions(): 221 if len(option) == 0: 222 # Doing nothing is always an option? 223 choices.append(option) 224 elif not actor.actions.illegal.has_key(str(option)): 225 # No deactivation of actions, so everything's possible 226 choices.append(option) 227 if len(choices) == 1: 228 actionDict[name] = choices[0] 229 exp = None 230 else: 231 # Determine action chosen by this actor 232 action,exp = actor.applyPolicy(actions=choices,history=history, 233 debug=debug,explain=explain) 234 actionDict[name] = action 235 node = doc.createElement('turn') 236 root.appendChild(node) 237 node.setAttribute('agent',name) 238 node.setAttribute('time',str(self.time+1)) 239 node.setAttribute('forced',str(len(choices) == 1)) 240 subDoc = self.explainAction(actionDict[name]) 241 node.appendChild(subDoc.documentElement) 242 if explain: 243 if exp: 244 node.appendChild(exp.documentElement) 245 if suggest: 246 subDoc = self.suggestAll(name,actionDict[name]) 247 element = subDoc.documentElement 248 element.appendChild(actor.entities.state.__xml__().documentElement) 249 node.appendChild(element) 250 # Update state and beliefs 251 result = {'decision': actionDict, 252 'delta': self.hypotheticalAct(actionDict, debug=debug), 253 'explanation': doc, 254 } 255 if explain: 256 subDoc = self.explainEffect(actionDict,result['delta']) 257 root.appendChild(subDoc.documentElement) 258 if not hypothetical: 259 # before applying changes, update the memory if the agent is a 260 # memory-based agent 261 # for agent in self.members(): 262 # if isinstance(agent, teamwork.agent.MemoryAgent.MemoryAgent): 263 # agent.updateMemory(actionDict,agent.getAllBeliefs()) 264 self.applyChanges(result['delta']) 265 return result
266
267 - def individualObs(self,state,actions,observation,agent):
268 """Probability of the specified observation 269 @warning: to be overridden""" 270 raise NotImplementedError
271
272 - def getMember(self,agent):
273 """Access the agent object stored in this scenario 274 @param agent: the label for the entity to be returned, either: 275 - if a string, the entity object of that name 276 - if a list of strings, then the list is treated as a recursive path representing a branch of the belief hierarchy, and the end node is returned 277 @type agent: string or list of strings 278 @rtype: L{Agent<teamwork.agent.Agent>}""" 279 if isinstance(entity,list): 280 eList = entity 281 entity = MultiagentSimulation.getMember(self,eList[0]) 282 for other in eList[1:]: 283 entity = entity.getEntity(other) 284 return entity 285 else: 286 return MultiagentSimulation.getMember(self,entity)
287
288 - def performMsg(self,msg,sender,receivers,hearers=[],debug=Debugger(), 289 explain=False):
290 """Updates the scenario in response to the specified message 291 @param msg: the message to be sent 292 @type msg: L{Message<teamwork.messages.PsychMessage.Message>} 293 @param sender: name of the agent sending this message 294 @type sender: str 295 @param receivers: list of agent names who are the intended receivers 296 @type receivers: str[] 297 @param hearers: list of agent names who are I{unintended} hearers 298 @type hearers: str[] 299 @note: hearers is optional, but should not contain any of the receivers 300 @param explain: if C{True}, then add an explanation to the result 301 @type explain: C{boolean} 302 @return: the overall effect of this message 303 @rtype: dict 304 """ 305 if explain: 306 doc = Document() 307 root = doc.createElement('step') 308 doc.appendChild(root) 309 root.setAttribute('time',str(self.time+1)) 310 node = doc.createElement('turn') 311 node.setAttribute('agent',sender) 312 node.setAttribute('time',str(self.time+1)) 313 node.setAttribute('forced',str(True)) 314 root.appendChild(node) 315 subNode = doc.createElement('decision') 316 node.appendChild(subNode) 317 subNode.appendChild(msg.__xml__().documentElement) 318 node = doc.createElement('effect') 319 root.appendChild(node) 320 else: 321 doc = None 322 # First, every receiver processes the message 323 try: 324 # Agents don't send messages to themselves (could cause problems) 325 receivers.remove(sender) 326 except ValueError: 327 pass 328 for name in receivers: 329 recap = {'label':'%s received message:' % (name)} 330 entity = self[name] 331 newMsg = copy.deepcopy(msg) 332 newMsg['actor'] = newMsg['sender'] = sender 333 newMsg['object'] = newMsg['receiver'] = name 334 newMsg['_observed'] = receivers 335 newMsg['_unobserved'] = [] 336 for potential in entity.getEntities(): 337 if not potential in receivers: 338 # Non-receivers are assumed not to have heard 339 newMsg['_unobserved'].append(potential) 340 result,exp = entity.stateEstimator(None,{sender:[newMsg]},-1, 341 debug) 342 subExp = exp[newMsg.pretty()] 343 if len(subExp) > 0 and explain: 344 node.appendChild(self.explainMessage(name,subExp).documentElement) 345 try: 346 hearers.remove(sender) 347 except ValueError: 348 pass 349 for other in hearers: 350 # Each hearer process message as well 351 recap = {'label':'%s overheard message:' % (other)} 352 entity = self[other] 353 msgList = [] 354 for name in receivers: 355 newMsg = copy.deepcopy(msg) 356 newMsg['actor'] = newMsg['sender'] = sender 357 newMsg['object'] = newMsg['receiver'] = name 358 newMsg['_observed'] = [other] 359 newMsg['_unobserved'] = [] 360 for potential in entity.getEntities(): 361 if not potential in receivers+[other]: 362 # Non-receivers (including other overhearers) are 363 # assumed not to have heard 364 newMsg['_unobserved'].append(potential) 365 msgList.append(newMsg) 366 if len(msgList) > 0: 367 result,exp = entity.stateEstimator(None,{sender:msgList},-1, 368 debug) 369 assert len(msgList) == 1 370 subExp = exp[msgList[0].pretty()] 371 if len(subExp) > 0 and explain: 372 node.appendChild(self.explainMessage(other,subExp).documentElement) 373 # Sender updates its beliefs at the next recursive level 374 entity = self[sender] 375 newReceivers = [] 376 for other in receivers: 377 if entity.hasBelief(other): 378 newReceivers.append(other) 379 newHearers = [] 380 for other in hearers: 381 if entity.hasBelief(other): 382 newHearers.append(other) 383 if len(newReceivers+newHearers) > 0: 384 exp = entity.entities.performMsg(msg,sender, 385 newReceivers, 386 newHearers,debug) 387 ## delta[sender] = exp 388 # Messages need to be incorporated into turn dynamics 389 self.time += 1 390 return doc
391
392 - def updateAll(self,action,debug=Debugger()):
393 """Obsolete, still here for backward compatibility""" 394 raise DeprecationWarning,'Use performAct instead'
395
396 - def compileDynamics(self,progress=None,total=100,profile=False):
397 """Pre-compiles all of the dynamics trees for these agents 398 @param progress: optional progress argument is invoked to give progress updates, in the form of C{lambda label,pct: ...}, where: 399 - I{label}: a string label to display for the current task 400 - I{pct}: an integer percentage (1-100) representing the amount of progress made since the last call 401 @type progress: lambda 402 @param total: the total number of actions to be compiled 403 @type total: int 404 @param profile: if C{True}, a profiler is run and statistics printed out (default is C{False} 405 @type profile: boolean 406 @warning: pre-compiles only those trees missing from cache. 407 @note: Applies this method to recursive beliefs of member agents as well""" 408 # Start compiling 409 if profile: 410 import hotshot,hotshot.stats 411 filename = '/tmp/stats' 412 prof = hotshot.Profile(filename) 413 prof.start() 414 if self.threaded: 415 lock = threading.Lock() 416 threads = [] 417 for entity in self.members(): 418 actionList = entity.actions.getOptions() 419 if progress and len(actionList) > 0: 420 msg = 'Compiling actions of %s' % (entity.name) 421 progress.put((msg,0.)) 422 for option in actionList: 423 self.updateTurn({entity.name:option}) 424 if self.threaded: 425 cmd = lambda : self.getDynamics({entity.name:option}, 426 lock) 427 thread = threading.Thread(target=cmd) 428 thread.start() 429 threads.append(thread) 430 else: 431 self.getDynamics({entity.name:option}) 432 if self.threaded: 433 for thread in threads: 434 if thread.isAlive(): 435 thread.join() 436 if progress and len(actionList) > 0: 437 progress.put((msg,max(1,100.*float(len(actionList))/total))) 438 # The following line is unnecessary if dynamics in beliefs are the same as the real dynamics 439 ## entity.entities.compileDynamics(progress,total) 440 if profile: 441 prof.stop() 442 prof.close() 443 print 'loading stats...' 444 stats = hotshot.stats.load(filename) 445 stats.strip_dirs() 446 stats.sort_stats('time', 'calls') 447 stats.print_stats()
448
449 - def compilePolicy(self,level=0,progress=None,total=100):
450 """Pre-compiles policy trees for member agents 451 @param level: The belief depth at which all agents will have their policies compiled, where 0 is the belief depth of the real agent. If the value of this flag is I{n}, then all agents at belief depthS{>=}I{n} will have their policies compiled, while no agents at belief depth<I{n} will. If C{None}, then no agents will have policies compiled. 452 @type level: int 453 @warning: pre-compiles only those trees missing from cache. 454 @note: Applies this method to recursive beliefs of member agents as well""" 455 for entity in self.members(): 456 entity.entities.compilePolicy(level,progress,total) 457 if level is not None and \ 458 len(entity.actions.getOptions()) > 0 and \ 459 entity.beliefDepth() >= level: 460 if entity.parent and entity.name == entity.parent.name: 461 # Do we want to compile here? I think not. 462 # But ask me again in a week or so. 463 continue 464 if progress: 465 msg = 'Compiling policy of %s' % (entity.ancestry()) 466 if isinstance(progress,Queue): 467 progress.put((msg,1.)) 468 else: 469 progress(msg,1.) 470 start = time.time() 471 entity.policy.initialize() 472 entity.policy.solve() 473 if not entity.parent: 474 print entity.name,'policy',time.time()-start
475
476 - def getDynamics(self,actionDict,lock=None,debug=False):
477 """Returns the overall dynamics function over the provided actions 478 @param lock: optional thread lock 479 @rtype: L{PWLDynamics}""" 480 # Look in cache for dynamics 481 actionKey = string.join(map(str,actionDict.values())) 482 if lock: 483 lock.acquire() 484 if not self.dynamics.has_key(actionKey): 485 if debug: 486 print 'Unable to find dynamics for:',actionDict 487 print 'Looking under:',actionKey 488 if lock: 489 lock.release() 490 # Create dynamics from scratch 491 dynamics = {'state':self.getStateDynamics(actionDict), 492 'actions':self.getActionDynamics(actionDict), 493 } 494 if lock: 495 lock.acquire() 496 self.dynamics[actionKey] = dynamics 497 if lock: 498 lock.release() 499 return self.dynamics[actionKey]
500
501 - def getStateDynamics(self,actionDict,errors=None):
502 """ 503 @return: the dynamics of the state vector in response to the given action 504 @param actionDict: the actions performed, indexed by actor name 505 @type actionDict: C{dict:str->L{Action<teamwork.action.PsychActions.Action>}[]} 506 @param errors: a dictionary to hold any dynamics bugs that are corrected (by default, the bugs are corrected but not returned) 507 @type errors: dict 508 """ 509 if errors is None: 510 errors = {} 511 dynamics = None 512 keyList = self.getStateKeys().keys() 513 keyList.sort() 514 remaining = filter(lambda k: isinstance(k,StateKey),keyList) 515 matrix = makeIdentityMatrix(keyList) 516 identity = PWLDynamics({'tree':ProbabilityTree(matrix)}) 517 # Treat action effects independently 518 for actList in actionDict.values(): 519 for action in actList: 520 # Merge in dynamics for all state features 521 actDynamics = None 522 for key in keyList: 523 if isinstance(key,StateKey): 524 entity = self[key['entity']] 525 feature = key['feature'] 526 subDyn = entity.getDynamics(action,feature) 527 if subDyn: 528 if key in remaining: 529 remaining.remove(key) 530 tree = subDyn.getTree() 531 if self.__VERIFY__: 532 errors.update(self.verifyTree(tree)) 533 if actDynamics is None: 534 actDynamics = PWLDynamics({'tree':copy.deepcopy(subDyn.getTree())}) 535 else: 536 actDynamics = actDynamics.merge(subDyn) 537 # Add dynamics for constant slot 538 matrix = ConstantDynamics() 539 subDyn = PWLDynamics({'tree':ProbabilityTree(matrix)}) 540 if actDynamics: 541 actDynamics = actDynamics.merge(subDyn) 542 else: 543 actDynamics = subDyn 544 # Remove any identity trees 545 actDynamics.getTree().pruneIdentities() 546 # Fill in any missing keys 547 actDynamics.getTree().fill(keyList) 548 actDynamics.getTree().freeze() 549 if dynamics is None: 550 dynamics = actDynamics 551 else: 552 dynamics += actDynamics 553 dynamics -= identity 554 for key in remaining: 555 # Features which are unaffected by actions 556 entity = self[key['entity']] 557 try: 558 subDyn = entity.dynamics[key['feature']][None] 559 if isinstance(subDyn,str): 560 subDyn = entity[subDyn].dynamics[feature][None] 561 except KeyError: 562 subDyn = IdentityDynamics(key['feature']) 563 subDyn = subDyn.instantiate(entity,{}) 564 subDyn.getTree().fill(keyList) 565 subDyn.getTree().freeze() 566 if dynamics is None: 567 dynamics = subDyn 568 else: 569 dynamics += subDyn 570 return dynamics
571
572 - def verifyTree(self,tree,validKeys=None,errors=None):
573 """Identifies and removes any extraneous keys in the given matrices 574 @param tree: the tree to check 575 @type tree: C{L{KeyedTree}[]} 576 @param errors: the errors found so far (defaults to empty) 577 @type errors: C{dict:L{StateKey}[]S{->}True} 578 @return: the extraneous keys found (C{L{StateKey}[]S{->}True}) 579 @rtype: dict 580 """ 581 if errors is None: 582 errors = {} 583 if validKeys is None: 584 validKeys = self.getStateKeys() 585 if tree.isLeaf(): 586 matrix = tree.getValue() 587 for rowKey in matrix.rowKeys(): 588 if isinstance(rowKey,StateKey) and \ 589 not validKeys.has_key(rowKey): 590 errors[rowKey] = True 591 del matrix[rowKey] 592 else: 593 row = matrix[rowKey] 594 for colKey in row.keys(): 595 if isinstance(colKey,StateKey) and \ 596 not validKeys.has_key(colKey): 597 errors[colKey] = True 598 del row[colKey] 599 else: 600 for child in tree.children(): 601 self.verifyTree(child,validKeys,errors) 602 if not tree.isProbabilistic(): 603 for plane in tree.split: 604 for key in plane.weights.keys(): 605 if isinstance(key,StateKey) and \ 606 not validKeys.has_key(key): 607 errors[key] = True 608 del plane.weights[key] 609 return errors
610
611 - def getActionDynamics(self,actionDict):
612 """ 613 @return: the dynamics of the action vector in response to the given action 614 @param actionDict: the actions performed, indexed by actor name 615 @type actionDict: C{dict:str->L{Action<teamwork.action.PsychActions.Action>}[]} 616 """ 617 keyList = self.getActions().keys() 618 if self.actionMatrix is None: 619 # Create a base dynamics matrix to use a starting point 620 self.actionMatrix = KeyedMatrix() 621 for rowKey in keyList: 622 row = copy.copy(self.getActions()) 623 for colKey in keyList: 624 if rowKey == colKey: 625 if colKey == keyConstant: 626 row[colKey] = 1. 627 else: 628 row[colKey] = self.observationDecay 629 else: 630 row[colKey] = 0. 631 self.actionMatrix[rowKey] = row 632 dynamics = copy.deepcopy(self.actionMatrix) 633 for actList in actionDict.values(): 634 for action in actList: 635 key = ActionKey({'type':action['type'], 636 'entity':action['actor'], 637 'object':action['object']}) 638 dynamics.set(key,keyConstant,1.) 639 dynamics.set(key,key,0.) 640 return dynamics
641
642 - def getDynamicsMatrix(self,worlds,lookup):
643 """Generates matrix representations of probabilistic dynamics, given the space of possible worlds 644 """ 645 transition = {} 646 for actions in self.generateActions(): 647 actionKey = ' '.join(map(str,actions.values())) 648 # Transform transition probability into matrix representation 649 dynamics = self.dynamics[actionKey] 650 matrix = KeyedMatrix() 651 for colKey,world in worlds.items(): 652 new = dynamics['state'].apply(world)*world 653 if isinstance(new,Distribution): 654 for element,prob in new.items(): 655 matrix.set(lookup[element],colKey,prob) 656 else: 657 matrix.set(lookup[new],colKey,1.) 658 matrix.fill(worlds.keys()) 659 matrix.freeze() 660 transition[actionKey] = matrix 661 return transition
662
663 - def hypotheticalAct(self,actions,beliefs=None,debug=Debugger()):
664 """ 665 Computes the scenario changes that would result from a given action 666 @param actions: dictionary of actions, where each entry is a list of L{Action<teamwork.action.PsychActions.Action>} instances, indexed by the name of the actor 667 @type actions: C{dict:str->L{Action<teamwork.action.PsychActions.Action>}[]} 668 @return: the changes that would result from I{actions} 669 - state: the change to state, in the form of a L{Distribution} over L{KeyedMatrix} (suitable to pass to L{applyChanges}) 670 - I{agent}: the change to the recursive beliefs of I{agent}, as returned by L{teamwork.agent.RecursiveAgent.RecursiveAgent.preComStateEstimator} 671 - turn: the change in turn in dictionary form (suitable to pass to L{applyTurn}): 672 @rtype: C{dict} 673 """ 674 # Eventual return value, storing up all the various belief 675 # deltas across the entities 676 overallDelta = {} 677 if len(self) == 0: 678 return overallDelta 679 if beliefs is None: 680 state = self.getState() 681 observations = self.getActions() 682 else: 683 state = beliefs['state'] 684 observations = beliefs['observations'] 685 overallDelta['turn'] = self.updateTurn(actions,debug) 686 dynamics = self.getDynamics(actions) 687 overallDelta['state'] = dynamics['state'].apply(state) 688 overallDelta['observations'] = dynamics['actions'] 689 # Do observation phase to update any recursive beliefs 690 if self.threaded: 691 lock = threading.Lock() 692 threads = [] 693 for entity in self.members(): 694 if len(entity.entities) == 0: 695 continue 696 observations = entity.observe(actions) 697 if len(observations) > 0: 698 name = entity.name 699 if beliefs is None: 700 world = None 701 elif beliefs.has_key(name): 702 world = beliefs[name] 703 else: 704 world = None 705 if self.threaded: 706 cmd = lambda: self.__updateAgent(entity,observations, 707 overallDelta,lock,world, 708 debug) 709 thread = threading.Thread(target=cmd) 710 thread.start() 711 threads.append(thread) 712 else: 713 result = entity.preComStateEstimator(world,observations, 714 debug=debug) 715 overallDelta[name] = result 716 if self.threaded: 717 # Loop until all threads finish 718 for thread in threads: 719 if thread.isAlive(): 720 thread.join() 721 return overallDelta
722
723 - def __updateAgent(self,entity,actions,delta,lock,world,debug):
724 result = entity.preComStateEstimator(world,actions,debug=debug) 725 name = entity.name 726 lock.acquire() 727 delta[name] = result 728 lock.release()
729
730 - def performAct(self,actions,debug=Debugger()):
731 """Updates all of the entities in response to the given actions 732 @param actions: a dictionary of actions, indexed by actor name 733 @return: a dictionary of the changes, returned by L{hypotheticalAct} 734 """ 735 turns = [] 736 for actor,actList in actions.items(): 737 turns.append({'name':actor, 738 'choices':[actList]}) 739 return self.microstep(turns,hypothetical=False,debug=debug)
740
741 - def applyChanges(self,delta,descend=True,rewind=False,beliefs=None):
742 """Applies the differential changes to this set of entities""" 743 if len(self.members()) == 0: 744 # There should be some way to avoid this 745 return 746 for key,subDelta in delta.items(): 747 if key == 'turn': 748 # Apply new turn info 749 self.applyTurn(subDelta,beliefs) 750 elif key == 'state': 751 if beliefs is None: 752 state = self.getState() 753 else: 754 state = beliefs['state'] 755 state = subDelta*state 756 if beliefs is None: 757 # Change the real state 758 self.state.clear() 759 for key,prob in state.items(): 760 self.state[key] = prob 761 else: 762 beliefs['state'] = state 763 elif key == 'observations': 764 if beliefs is None: 765 self.actions = subDelta*self.getActions() 766 else: 767 beliefs['observations'] = subDelta*beliefs['observations'] 768 elif key == 'relationships': 769 pass 770 else: 771 # Apply changes to recursive beliefs 772 entity = self[key] 773 if beliefs is None: 774 entity.entities.applyChanges(subDelta,descend,rewind) 775 try: 776 entity.updateLinks(subDelta['relationships']) 777 except KeyError: 778 pass 779 else: 780 entity.entities.applyChanges(subDelta,descend,rewind, 781 beliefs[key])
782
783 - def reachable(self,horizon,reachable=None,states=None):
784 """Generates the possible real-world states reachable from the current states 785 @param horizon: the number of time steps to project into the future 786 @type horizon: int 787 @rtype: L{KeyedVector}[] 788 @note: Observations are not tracked, as we assume that they do not affect reachability 789 """ 790 if reachable is None: 791 reachable = {} 792 if states is None: 793 states = [{'state':self.getState(), 794 'observations':self.getActions(), 795 'turn':self.order, 796 }] 797 for element in states[0]['state'].domain(): 798 reachable[element] = True 799 if horizon == 0: 800 return reachable.keys() 801 else: 802 newStates = [] 803 for beliefs in states: 804 for actionSet in self.generateActions(self.next()): 805 delta = self.hypotheticalAct(actionSet,beliefs) 806 newBeliefs = {'state':delta['state']*beliefs['state']} 807 newBeliefs['observations'] = delta['observations']*beliefs['observations'] 808 newBeliefs['turn'] = delta['turn'].getValue()*beliefs['turn'] 809 for element in newBeliefs['state'].domain(): 810 reachable[element] = True 811 for old in newStates: 812 if old['turn'] == newBeliefs['turn'] and \ 813 old['state'] == newBeliefs['state']: 814 break 815 else: 816 newStates.append(newBeliefs) 817 return self.reachable(horizon-1,reachable,newStates)
818
819 - def generateWorlds(self,level=0,maxSize=100,worlds=None):
820 """Generates the space of possible I{n}-level worlds within the current simulation 821 @param level: I{n} 822 @type level: int 823 @param maxSize: the upper limit on the size of the generated space (Default is 100) 824 @type maxSize: int 825 @return: dictionary of worlds, indexed by L{WorldKey}, and a reverse lookup dictionary as well (i.e., L{WorldKey}S{->}L{KeyedVector},L{KeyedVector}S{->}L{WorldKey} 826 @rtype: dict,dict 827 """ 828 if worlds is None: 829 worlds = {} 830 if level == 0: 831 horizon = len(self.activeMembers()) - 1 832 space = [] 833 newSpace = [None] 834 while len(newSpace) > len(space) and len(newSpace) < maxSize: 835 horizon += 1 836 space = newSpace 837 newSpace = self.reachable(horizon) 838 lookup = {} 839 for index in range(len(newSpace)): 840 key = WorldKey({'world':index}) 841 worlds[key] = newSpace[index] 842 lookup[worlds[key]] = key 843 return worlds,lookup 844 else: 845 raise NotImplementedError,'Unable to generate belief space for agents who are not 0-level'
846 847 #################### 848 # Explanation Code # 849 #################### 850
851 - def explainEffect(self,actions,effect={},prefix=None):
852 doc = Document() 853 root = doc.createElement('effect') 854 doc.appendChild(root) 855 for key,delta in effect.items(): 856 if key == 'state': 857 if len(delta.expectation()) > 1: 858 node = doc.createElement('state') 859 root.appendChild(node) 860 oldState = self.getState() 861 newState = delta*oldState 862 diff = newState-oldState 863 node.appendChild(diff.__xml__().documentElement) 864 elif key == 'turn': 865 pass 866 elif key == 'explanation': 867 pass 868 elif key == 'observations': 869 pass 870 elif key == 'relationships': 871 pass 872 else: 873 node = doc.createElement('beliefs') 874 node.setAttribute('agent',key) 875 beliefs = self[key].entities 876 subDoc = beliefs.explainEffect(actions,delta) 877 node.appendChild(subDoc.documentElement) 878 return doc
879
880 - def explainAction(self,actions):
881 doc = Document() 882 root = doc.createElement('decision') 883 doc.appendChild(root) 884 for action in actions: 885 root.appendChild(action.__xml__().documentElement) 886 return doc
887
888 - def explainExpectation(self,breakdown):
889 doc = Document() 890 root = doc.createElement('expectation') 891 doc.appendChild(root) 892 for t in range(len(breakdown)): 893 # Extract countermove 894 step = breakdown[t] 895 for other,actList in step['action'].items(): 896 node = doc.createElement('turn') 897 node.setAttribute('agent',other) 898 for action in actList: 899 node.appendChild(action.__xml__().documentElement) 900 return doc
901
902 - def explainDecision(self,actor,explanation):
903 """Extracts explanation from explanation structure""" 904 doc = Document() 905 root = doc.createElement('explanation') 906 doc.appendChild(root) 907 # Package up the chosen action 908 decision = explanation['decision'] 909 # Extract what actor expects from best decision 910 lookahead = explanation['options'][str(decision)] 911 subDoc = self.explainExpectation(lookahead['breakdown']) 912 root.appendChild(subDoc.documentElement) 913 return doc
914
915 - def explainMessage(self,name,explanation):
916 """Extracts explanation from message acceptance explanation structure""" 917 doc = Document() 918 root = doc.createElement('hearer') 919 doc.appendChild(root) 920 root.setAttribute('agent',name) 921 root.setAttribute('decision',str(explanation['decision'])) 922 if explanation['breakdown']['accept'] == 'forced': 923 root.setAttribute('forced',str(True)) 924 elif explanation['breakdown']['reject'] == 'forced': 925 root.setAttribute('forced',str(True)) 926 else: 927 root.setAttribute('forced',str(False)) 928 for key,value in explanation['breakdown']['accept'].items(): 929 node = doc.createElement('factor') 930 root.appendChild(node) 931 positive = float(value) > self[name].beliefWeights['threshold'] 932 node.setAttribute('positive',str(positive)) 933 node.setAttribute('type',key) 934 return doc
935
936 - def setModelChange(self,flag=-1):
937 """Sets the model change flag value across all entities 938 @param flag: if flag argument is positive, activates model changes in the belief updates of these entities; if 0, deactivates them; if negative (default), toggles the activation state 939 @type flag: C{int} 940 """ 941 for entity in self.members(): 942 if flag: 943 if flag < 0: 944 entity.modelChange = not entity.modelChange 945 else: 946 entity.modelChange = True 947 else: 948 entity.modelChange = False
949
950 - def detectViolations(self,action,objectives=None):
951 """Determines which objectives the given action violates 952 @param objectives: the objectives to test (default is all) 953 @type objectives: (str,str,str)[] 954 @type action: L{Action<teamwork.action.PsychActions.Action>} 955 @rtype: (str,str,str)[] 956 """ 957 if objectives is None: 958 objectives = self.objectives 959 actor = self[action['actor']] 960 violated = [] 961 for objective in objectives: 962 if objective[0] == actor.name or objective[0] == 'Anybody': 963 if not objective[1] in actor.getStateFeatures(): 964 if objective[1] == action['type']: 965 if objective[2] == 'Minimize': 966 # Bad action happened 967 violated.append(objective) 968 else: 969 if objective[2] == 'Maximize': 970 # Good action didn't happen 971 violated.append(objective) 972 return violated
973
974 - def suggestAll(self,actor,option):
975 """Generates alternative beliefs that might change the given action into one that satisfies all objectives 976 @type actor: str 977 @type option: L{Action<teamwork.action.PsychActions.Action>}[] 978 @rtype: Document 979 """ 980 doc = Document() 981 root = doc.createElement('suggestions') 982 doc.appendChild(root) 983 root.setAttribute('time',str(self.time+1)) 984 root.setAttribute('objectives',str(len(self.objectives))) 985 # Find any violated objectives 986 violated = {} 987 for action in option: 988 for violation in self.detectViolations(action): 989 violated[self.objectives.index(violation)] = True 990 root.setAttribute('violations',str(len(violated))) 991 # Go through each violated objective and generate suggestions 992 for index in violated.keys(): 993 objective = self.objectives[index] 994 node = objective2XML(objective,doc) 995 root.appendChild(node) 996 suggestions = self.suggest(actor,option,objective) 997 node.setAttribute('count',str(len(suggestions))) 998 counts = {} 999 for suggestion in suggestions[:]: 1000 if len(suggestion) > 0: 1001 try: 1002 counts[str(suggestion)] += 1 1003 suggestions.remove(suggestion) 1004 except KeyError: 1005 counts[str(suggestion)] = 1 1006 else: 1007 suggestions.remove(suggestion) 1008 for suggestion in suggestions: 1009 child = doc.createElement('suggestion') 1010 node.appendChild(child) 1011 for threshold in suggestion.values(): 1012 key = threshold['key'] 1013 grandchild = key.__xml__().documentElement 1014 grandchild.setAttribute('min',str(threshold['min'])) 1015 grandchild.setAttribute('max',str(threshold['max'])) 1016 child.appendChild(grandchild) 1017 return doc
1018
1019 - def suggest(self,actor,action,objective):
1020 """Suggest alternative beliefs that might change the given action into one that satisfies the given objective 1021 @type actor: str 1022 @type action: L{Action<teamwork.action.PsychActions.Action>}[] 1023 @type objective: (str,str,str) 1024 @rtype: dict[] 1025 """ 1026 # Compute the expected sequence of actions 1027 sequence = self[actor].multistep(horizon=self[actor].horizon, 1028 start={actor:action}) 1029 beliefs = self[actor].entities 1030 # Compute the dynamics after this first action 1031 dynamics = None 1032 for t in range(len(sequence)): 1033 decision = sequence[t]['action'] 1034 tree = beliefs.getDynamics(decision)['state'].getTree() 1035 if dynamics is None: 1036 dynamics = tree 1037 actual = tree 1038 else: 1039 dynamics = tree*dynamics 1040 actual += dynamics 1041 alternatives = [] 1042 # Compute the other satisfactory options the actor could've done 1043 targets = [] 1044 for option in self[actor].actions.getOptions(): 1045 for action in option: 1046 if self.detectViolations(action,[objective]): 1047 break 1048 else: 1049 targets.append(option) 1050 assert not action in targets 1051 goals = self[actor].getGoalVector()['state'] 1052 goals.fill(self.state.domain()[0].keys()) 1053 for option in targets: 1054 # Substitute alternative action into the lookahead 1055 # (we could re-create the whole lookahead, 1056 # but that might be expensive) 1057 dynamics = beliefs.getDynamics({actor:option})['state'].getTree() 1058 desired = dynamics 1059 for t in range(1,len(sequence)): 1060 decision = sequence[t]['action'] 1061 tree = beliefs.getDynamics(decision)['state'].getTree() 1062 dynamics = tree*dynamics 1063 desired += dynamics 1064 delta = desired - actual 1065 # Identify leaves where target action is preferred 1066 for leaf in delta.leafNodes(): 1067 plane = KeyedPlane((goals*leaf.getValue()).domain()[0],0.) 1068 result = plane.always() 1069 if result: 1070 # Always true 1071 conditions = leaf.getPath() 1072 elif result is None: 1073 # Sometimes true, sometimes false 1074 conditions = leaf.getPath() 1075 conditions.append(([plane],True)) 1076 else: 1077 # Never true 1078 conditions = [] 1079 thresholds = {} 1080 for split,value in conditions: 1081 for plane in split: 1082 feature = None 1083 for key in split[0].weights.keys(): 1084 if abs(split[0].weights[key]) > epsilon: 1085 if isinstance(key,StateKey): 1086 if feature is None: 1087 feature = key 1088 sign = split[0].weights[key] 1089 sign /= abs(sign) 1090 else: 1091 # Two state features in this plane 1092 break 1093 else: 1094 # Found no more than one relevant state feature 1095 if feature is None: 1096 # Didn't find any states? Shouldn't happen 1097 pass 1098 else: 1099 # Merge this threshold with previous 1100 try: 1101 threshold = thresholds[feature] 1102 except KeyError: 1103 threshold = {'max':1.,'min':-1., 1104 'key':feature} 1105 thresholds[feature] = threshold 1106 if sign > 0.: 1107 if value: 1108 # Positive weight, test is True 1109 key = 'min' 1110 function = max 1111 else: 1112 # Positive weight, test is False 1113 key = 'max' 1114 function = min 1115 else: 1116 if value: 1117 # Negative weight, test is True 1118 key = 'max' 1119 function = min 1120 else: 1121 # Negative weight, test is False 1122 key = 'min' 1123 function = max 1124 threshold[key] = function(threshold[key], 1125 sign*plane.threshold) 1126 for key,threshold in thresholds.items(): 1127 if threshold['max'] < threshold['min']: 1128 # Contradictory constraints 1129 break 1130 else: 1131 # All of the constraints are kosher 1132 alternatives.append(thresholds) 1133 return alternatives
1134
1135 - def toHTML(self):
1136 str = '<TABLE WIDTH="100%">\n' 1137 str = str + '<TBODY>\n' 1138 odd = None 1139 for entity in self.members(): 1140 str = str + '<TR' 1141 if odd: 1142 str = str + ' BGCOLOR="#aaaaaa"' 1143 else: 1144 str = str + ' BGCOLOR="#ffffff"' 1145 str = str + '>' 1146 ## str = str + '<TH>' + entity.ancestry() + '</TH>\n' 1147 str = str + '<TD>' + entity.toHTML() + '</TD>\n' 1148 str = str + '</TR>\n' 1149 odd = not odd 1150 str = str + '</TBODY>\n' 1151 str = str + '</TABLE>\n' 1152 return str
1153
1154 - def __xml__(self):
1155 doc = MultiagentSimulation.__xml__(self) 1156 for objective in self.objectives: 1157 doc.documentElement.appendChild(objective2XML(objective,doc)) 1158 if self.society: 1159 node = doc.createElement('society') 1160 node.appendChild(self.society.__xml__().documentElement) 1161 doc.documentElement.appendChild(node) 1162 return doc
1163
1164 - def parse(self,element,agentClass=None,societyClass=None):
1165 """ 1166 @param agentClass: the Python class for the individual entity members 1167 @type agentClass: class 1168 @param societyClass: the optional Python class for any generic society associated with this scenario 1169 @type societyClass: class 1170 """ 1171 MultiagentSimulation.parse(self,element,agentClass) 1172 if societyClass: 1173 self.society = societyClass() 1174 child = element.firstChild 1175 while child: 1176 if child.nodeType == child.ELEMENT_NODE: 1177 if child.tagName == 'objective': 1178 objective = (str(child.getAttribute('who')), 1179 str(child.getAttribute('what')), 1180 str(child.getAttribute('how')), 1181 ) 1182 self.objectives.append(objective) 1183 elif child.tagName == 'society' and societyClass: 1184 grandchild = child.firstChild 1185 while grandchild: 1186 if grandchild.nodeType == child.ELEMENT_NODE: 1187 break 1188 grandchild = grandchild.nextSibling 1189 if grandchild: 1190 self.society.parse(grandchild) 1191 child = child.nextSibling 1192 return self
1193
1194 - def __copy__(self):
1195 entities = self.__class__(self.members()) 1196 entities.objectives = self.objectives[:] 1197 return entities
1198
1199 - def __deepcopy__(self,memo):
1200 result = self.__class__() 1201 memo[id(self)] = result 1202 result.__init__(copy.deepcopy(self.members(),memo)) 1203 result.objectives = copy.deepcopy(self.objectives,memo) 1204 return result
1205
1206 -def objective2XML(objective,doc):
1207 node = doc.createElement('objective') 1208 node.setAttribute('who',str(objective[0])) 1209 node.setAttribute('what',str(objective[1])) 1210 node.setAttribute('how',str(objective[2])) 1211 return node
1212 1213 if __name__ == '__main__': 1214 import os 1215 from teamwork.utils.PsychUtils import load 1216 1217 entities = load('%s/python/teamwork/examples/Scenarios/school.scn' \ 1218 % (os.environ['HOME'])) 1219 doc = entities.__xml__() 1220 print doc.toxml() 1221