Package teamwork :: Package examples :: Package Thespian :: Module ThespianAgent
[hide private]
[frames] | no frames]

Source Code for Module teamwork.examples.Thespian.ThespianAgent

  1  from teamwork.agent.DefaultBased import *  
  2  from teamwork.agent.Entities import * 
  3  from teamwork.multiagent.sequential import * 
  4  from teamwork.multiagent.GenericSociety import * 
  5  from teamwork.action.PsychActions import * 
  6  
 
  7  from teamwork.dynamics.pwlDynamics import * 
  8  from teamwork.policy.policyTable import PolicyTable 
  9  
 
 10  sceneID = 3 
 11  
 
12 -class ThespianAgent(PsychEntity):
13 ## actionClass = ThespianAction 14 ## beliefClass = ThespianAgents 15
16 - def observe(self,actionDict={}):
17 """ 18 @param actionDict: the performed actions, indexed by actor name 19 @type actionDict: C{dict:strS{->}L{Action}[]} 20 @return: observations this entity would make of the given actions, in the same format as the provided action dictionary 21 @rtype: C{dict} 22 """ 23 observations = {} 24 for actor,action in actionDict.items(): 25 # mei 08/07/15 can't over action that happen at other locations 26 #try: 27 # if not self.getBelief(actor,'location').expectation()== self.getState('location').expectation(): 28 # continue 29 #except: 30 # pass 31 if actor == self.name: 32 # Always observe our own actions (I assume this is OK) 33 observations[actor] = action 34 else: 35 observation = [] 36 for subAct in action: 37 if subAct.has_key('_observed'): 38 # Forced to observe 39 if self.name in subAct['_observed']: 40 observation.append(subAct) 41 continue 42 if subAct.has_key('_unobserved'): 43 # Forced to *not* observe 44 if self.name in subAct['_unobserved']: 45 continue 46 if subAct['object'] == self.name: 47 # Always observe actions directed at 48 # ourselves (this is questionable) 49 observation.append(subAct) 50 else: 51 # Check whether we can observe this actor's acts 52 for omega,entries in self.observations.items(): 53 if str(subAct) == omega: 54 for entry in entries.keys(): 55 if subAct.matchTemplate(entry): 56 if entry['_observable']: 57 # We can definitely observe this 58 observation.append(subAct) 59 else: 60 # We can definitely *not* observe 61 pass 62 break 63 else: 64 # No matching action generates this observation 65 break 66 else: 67 # By default, assume observable 68 observation.append(subAct) 69 if len(observation) > 0: 70 # Only add observations if we have any (questionable?) 71 observations[actor] = observation 72 return observations
73 74
75 - def applyPolicy(self,state=None,actions=[],history=None,debug=None, 76 explain=False):
77 """Generates a decision chosen according to the agent's current policy 78 @param state: the current state vector 79 @type state: L{Distribution}(L{KeyedVector}) 80 @param actions: the possible actions the agent can consider (defaults to all available actions) 81 @type actions: L{Action}[] 82 @param history: a dictionary of actions that have already been performed (and which should not be performed again if they are labeled as not repeatable) 83 @type history: L{Action}[]:bool 84 @param explain: flag indicating whether an explanation should be generated 85 @type explain: bool 86 @return: a list of actions and an explanation, the latter provided by L{execute<PolicyTable.execute>} 87 @rtype: C{(L{Action}[],Element)} 88 """ 89 if state is None: 90 state = self.getAllBeliefs() 91 92 ##07/10/08 mei added to prevent the agent considering actions that should not happen 93 if len(actions) == 1: 94 pass 95 else: 96 actions = self.actions.getOptions() 97 actions = self.updateActionChoices(actions,state) 98 99 return self.policy.execute(state=state,choices=actions, 100 history=history,debug=debug,explain=explain)
101 102
103 - def updateActionChoices(self,choices,state=None,includeLocation = False):
104 105 if sceneID == 7: 106 return self.updateActionChoices7(choices,state) 107 108 if not state: 109 state=self.getAllBeliefs() 110 111 actions = [] 112 113 key = StateKey({'entity':self.name,'feature':'power'}) 114 selfLocation = state['state'].getMarginal(key).expectation() 115 if selfLocation == 0: 116 actions.append([Action({'actor':self.name,'type':'wait'})]) 117 return actions 118 119 if self.name in ['red','granny']: 120 key = StateKey({'entity':self.name,'feature':'eaten'}) 121 selfEaten = state['state'].getMarginal(key).expectation() 122 if selfEaten >= 1: 123 try: 124 key = StateKey({'entity':'wolf','feature':'alive'}) 125 wolfAlive = state['state'].getMarginal(key).expectation() 126 if wolfAlive <=0: 127 actions.append([Action({'actor':self.name,'type':'escape','object':'wolf'})]) 128 except: 129 pass 130 actions.append([Action({'actor':self.name,'type':'wait'})]) 131 return actions 132 133 134 if self.name in ['wolf','red']: 135 key = StateKey({'entity':self.name,'feature':'alive'}) 136 selfAlive = state['state'].getMarginal(key).expectation() 137 if selfAlive <=0: 138 actions.append([Action({'actor':self.name,'type':'wait'})]) 139 return actions 140 141 142 for option in choices: 143 if option[0]['type'] == 'escape': 144 continue 145 146 if option[0]['type'] in ['give-cake','eat-cake']: 147 key = StateKey({'entity':self.name,'feature':'has-cake'}) 148 selfHasCake = state['state'].getMarginal(key).expectation() 149 if selfHasCake < 1: 150 continue 151 152 #if option[0]['type'] in ['moveto-granny'] and self.name == 'wolf': 153 # key = StateKey({'entity':self.name,'feature':'know-granny'}) 154 # selfKnowGranny = state['state'].getMarginal(key).expectation() 155 # if selfKnowGranny <1: 156 # continue 157 # 158 #if option[0]['type'] == 'moveto-granny': 159 # key = StateKey({'entity':self.name,'feature':'location'}) 160 # selfLocation = state['state'].getMarginal(key).expectation() 161 # key = StateKey({'entity':'granny','feature':'location'}) 162 # grannyLocation = state['state'].getMarginal(key).expectation() 163 # #print 'selfLocation, grannyLocation: ', selfLocation,grannyLocation 164 # if selfLocation == grannyLocation: 165 # continue 166 167 if not option[0].has_key('object'): 168 type = option[0]['type'] 169 if type.find('move')>-1 or type.find('enter')>-1: 170 key = StateKey({'entity':self.name,'feature':'indoor'}) 171 selfLocation = state['state'].getMarginal(key).expectation() 172 if selfLocation>.5: 173 continue 174 if type.find('exist')>-1: 175 key = StateKey({'entity':self.name,'feature':'indoor'}) 176 selfLocation = state['state'].getMarginal(key).expectation() 177 if selfLocation<.5: 178 continue 179 actions.append(option) 180 181 182 key = StateKey({'entity':self.name,'feature':'location'}) 183 selfLocation = state['state'].getMarginal(key).expectation() 184 key = StateKey({'entity':option[0]['object'],'feature':'location'}) 185 objectLocation = state['state'].getMarginal(key).expectation() 186 if not abs(selfLocation - objectLocation)<0.0001: 187 continue 188 189 if option[0]['object'] in ['wolf','red']: 190 key = StateKey({'entity':option[0]['object'],'feature':'alive'}) 191 objectAlive = state['state'].getMarginal(key).expectation() 192 if objectAlive <=0: 193 continue 194 195 if option[0]['object'] in ['red','granny']: 196 key = StateKey({'entity':option[0]['object'],'feature':'eaten'}) 197 objectEaten = state['state'].getMarginal(key).expectation() 198 if objectEaten >0: 199 continue 200 201 #if includeLocation == True: 202 # key = StateKey({'entity':self.name,'feature':option[0]['object']+'Location'}) 203 # objectLocation = state['state'].getMarginal(key).expectation() 204 # if not abs(selfLocation - objectLocation)<0.001: 205 # continue 206 207 208 209 if option[0]['type'] == 'inform': 210 key = StateKey({'entity':self.name,'feature':'being-enquired'}) 211 selfEnquired = state['state'].getMarginal(key).expectation() 212 if selfEnquired <1: 213 continue 214 obj = option[0]['object'] 215 key = StateKey({'entity':obj,'feature':'enquired'}) 216 objEnquired = state['state'].getMarginal(key).expectation() 217 if not objEnquired >.5: 218 continue 219 220 if option[0]['type'] == 'help' and self.name=='wolf': 221 key = StateKey({'entity':self.name,'feature':'helped'}) 222 selfHelped = state['state'].getMarginal(key).expectation() 223 # can only help the woodcutter once 224 if selfHelped >.5: 225 continue 226 227 if option[0]['type'] in ['eat'] and option[0]['object'] == 'granny': 228 key = StateKey({'entity':self.name,'feature':'know-granny'}) 229 selfKnowGranny = state['state'].getMarginal(key).expectation() 230 #if wolf don't know granny, but accidently decide to enter the house, he can eat granny 231 if selfKnowGranny < 1: 232 continue 233 key = StateKey({'entity':self.name,'feature':'indoor'}) 234 selfIndoor = state['state'].getMarginal(key).expectation() 235 if selfIndoor < 1: 236 continue 237 238 if option[0]['type'] in ['talkabout-granny']: 239 key = StateKey({'entity':'wolf','feature':'know-granny'}) 240 wolfKnowGranny = state['state'].getMarginal(key).expectation() 241 if wolfKnowGranny >.5: 242 continue 243 244 245 actions.append(option) 246 247 248 return actions
249 250 251 #actionDict = {} 252 #actionDict[actor] = option 253 #result = self.entities.hypotheticalAct(actionDict) 254 # 255 #feasibleAction = True 256 #for value,prob in result['state'].items(): 257 # for key in value: 258 # try: 259 # if key['entity'] == actor and key['feature']== 'actAliveNorm': 260 # if value[key][keyConstant] < 0: 261 # feasibleAction = False 262 # elif key['entity'] == actor and key['feature']== 'resp-norm': 263 # if value[key][keyConstant] < 0: 264 # feasibleAction = False 265 # elif key['entity'] == actor and key['feature']== 'specialRule': 266 # if value[key][keyConstant] < 0: 267 # #print option[0], key, value[key][keyConstant] 268 # feasibleAction = False 269 # except: 270 # pass 271 # 272 #if not feasibleAction: 273 # option[0]['active']=0 274
275 - def updateActionChoices7(self,choices,state=None,includeLocation = False):
276 convert={'unsafesex':1,'safesex':2,'drink':3,'physicaltouch':4} 277 278 if not state: 279 state=self.getAllBeliefs() 280 actions = [] 281 282 for option in choices: 283 284 if option[0]['type'].find('accept')>-1 or option[0]['type'].find('reject')>-1: 285 key = StateKey({'entity':self.name,'feature':'topic'}) 286 topic = state['state'].getMarginal(key).expectation() 287 if topic == 0: 288 continue 289 290 offer,thisTopic = string.split(option[0]['type'],'-') 291 if not convert[thisTopic] == topic: 292 continue 293 294 actions.append(option) 295 296 297 return actions
298 299 300 ## 07/01/07 mei modified to count fixedActions 301 ## fixedActions=[{'red-move2':[Action({'actor':'wolf','type':'wait'})]}] 302 ## fixedActions = [] 303 #def multistep(self,horizon=1,start={},state=None,debug=Debugger()): 304 # """Steps this entity the specified number of steps into the future (where a step is one entity performing its policy-specified action) 305 # @param state: the world state to evaluate the actions in (defaults to current world state) 306 # @type state: L{Distribution}(L{KeyedVector}) 307 # @warning: This method still needs to be handed an updated turn vector 308 # """ 309 # if state is None: 310 # state = self.getAllBeliefs() 311 # sequence = [] 312 # # Lookahead 313 # for t in range(horizon): 314 # debug.message(9,'Time %d' % (t)) 315 # if t == 0: 316 # entityDict = start 317 # else: 318 # entityDict = {} 319 # nextGroup = self.entities.next(state['turn']) 320 # for entity in nextGroup: 321 # if isinstance(entity,dict): 322 # try: 323 # choices = entity['choices'] 324 # except KeyError: 325 # choices = [] 326 # entity = entity['name'] 327 # else: 328 # raise DeprecationWarning,'Turns should be expressed in dictionary form' 329 # 330 # ## give the fixed action specified in $fixedActions$ as the agent's only choice 331 # if len(sequence)>0: 332 # lastAct = max(sequence)['action'] 333 # for lastActor in lastAct: 334 # tmp = lastAct[lastActor][0] 335 # lastAct = `tmp` 336 # else: 337 # lastAct = '' 338 # 339 # for fixedAction in self.fixedActions: 340 # if fixedAction.has_key(lastAct) : 341 # entityDict[entity] = fixedAction[lastAct] 342 # break 343 # else: 344 # if len(entityDict) < len(nextGroup) and \ 345 # not entityDict.has_key(entity): 346 # entityDict[entity] = choices 347 # # Apply these entities' actions 348 # delta = self.step(entityDict,state,debug) 349 # self.updateStateDict(state,delta['effect']) 350 # # Accumulate results 351 # sequence.append(delta) 352 # return sequence 353 354
355 - def forceinitState(self):
356 """Instantiates all of the state defaults relevant to this entity""" 357 """do not check if the feature has already been set""" 358 for cls in self.classes: 359 try: 360 featureList = self.hierarchy[cls].getStateFeatures() 361 except KeyError: 362 featureList = [] 363 for feature in featureList: 364 value = self.hierarchy[cls].getState(feature) 365 self.setState(feature,value) 366 self.setModel(None)
367 368
369 - def fitAllAction(self,horizon=-1,state=None):
370 """Computes a set of constraints on possible goal weights for this agent that, if satisfied, will cause the agent to prefer the desired action in the given state. Each constraint is dictionary with the following elements: 371 - delta: the total difference that must be made up 372 - slope: dictionary of coefficients for each goal weight in the sum that must make up that difference 373 - plane: the vector of weights, such that the product of this vector and the goal weight vector must exceed 0 for the desired action to be preferred 374 @param desired: the action that the agent should prefer 375 @type desired: L{Action}[] 376 @param horizon: the horizon of lookahead to use (if not provided, the agent's default horizon is used) 377 @type horizon: int 378 @param state: the current state of this agent's beliefs (if not provided, defaults to the result of L{getAllBeliefs} 379 @type state: dict 380 @return: a list of constraints 381 @rtype: dict[] 382 """ 383 if horizon < 0: 384 horizon = self.horizon 385 if state is None: 386 state = self.getAllBeliefs() 387 goals = self.getGoalVector()['total'] 388 if len(goals.domain()) != 1: 389 raise NotImplementedError,\ 390 'Unable to handle uncertain goals when fitting' 391 goals = goals.domain()[0] 392 # Compute projections for all actions 393 matrices = {} 394 for action in self.actions.getOptions(): 395 sequence = self.multistep(horizon=horizon, 396 start={self.name:action}, 397 state=copy.deepcopy(state)) 398 value = None 399 if self.valueType == 'average': 400 for t in range(len(sequence)): 401 # For now, assume no uncertainty 402 assert len(sequence[t]['state'].domain()) == 1 403 current = copy.deepcopy(sequence[t]['state'].domain()[0]) 404 # Add in current state 405 if value is None: 406 value = current 407 else: 408 current.unfreeze() 409 current.fill(value.keys()) 410 current.freeze() 411 value += current 412 # Add in relevant actions 413 for key in filter(lambda k:isinstance(k,ObservationKey), 414 goals.keys()): 415 if not value.has_key(key): 416 value.unfreeze() 417 value[key] = 0. 418 value.freeze() 419 for act in sum(sequence[t]['action'].values(),[]): 420 if act['type'] == key['type']: 421 value[key] += 1. 422 elif self.valueType == 'final': 423 # Assume no action goals if we care about only the final state 424 value = sequence[-1]['state'] 425 else: 426 raise NotImplementedError,\ 427 'I do not know how to fit "%s" expected value' \ 428 % (self.valueType) 429 matrices[str(action)] = value 430 431 # Compare against desired action 432 # added by mei: generate constraints for each possible action 433 allConstraints = {} 434 for desired in self.actions.getOptions(): 435 constraints = [] 436 for action in self.actions.getOptions(): 437 if action != desired: 438 projection = matrices[str(desired)] - matrices[str(action)] 439 goals.fill(projection.keys()) 440 diff = goals*projection 441 constraint = {'delta':diff, 442 'value':True, 443 'slope':{}, 444 } 445 for key in goals.keys(): 446 constraint['slope'][key] = projection[key] 447 constraints.append(constraint) 448 allConstraints[str(desired)] = constraints 449 return allConstraints
450 451
452 - def applyRealGoals(self,entity=None,world=None,debug=Debugger(),fixedgoals=['sameLocation','actAlive','specialRule']):
453 """ 454 @param entity: the entity whose goals are to be evaluated (default is self) 455 @type entity: L{GoalBasedAgent} 456 @param world: the context for evaluating the goals (default is the beliefs of I{entity}) 457 @type world: L{teamwork.multiagent.PsychAgents.PsychAgents} 458 @return: expected reward of the I{entity} in current I{world} 459 @rtype: L{Distribution} over C{float}""" 460 if not entity: 461 entity = self 462 if world is None: 463 world = self.entities 464 state = world.getState() 465 goals = entity.getGoalVector() 466 467 ## mei modified 468 ## only real goals take effect 469 gstate = goals['state'] 470 for item, prob in gstate.items(): 471 del gstate[item] 472 for goal in fixedgoals: 473 key = StateKey({'entity':self.name,'feature':goal}) 474 try: 475 item.__delitem__(key) 476 except: 477 pass 478 gstate[item] = prob 479 480 goals['state'].fill(state.domain()[0].keys(),0.) 481 482 return goals['state']*state
483 484
485 - def getDefault(self,feature):
486 """Finds the most specific class defaults for the specified 487 feature; raises KeyError exception if no default exists""" 488 if self.hierarchy is None: 489 # Duh, no defaults 490 raise KeyError,'%s has no feature %s' % (self.name,feature) 491 result = None 492 last = None 493 for cls in self.classes: 494 # Check object attributes 495 try: 496 result = self.hierarchy[cls].__dict__[feature] 497 except KeyError: 498 # Check special attributes 499 try: 500 result = self.hierarchy[cls].attributes[feature] 501 except KeyError: 502 # Nothing here 503 continue 504 if feature in ['models','dynamics']: 505 # Empty values don't count 506 if len(result) > 0: 507 break 508 elif feature == 'imageName': 509 if result is not None: 510 break 511 elif feature == 'actions': 512 if result.branchType: 513 # Can't really merge AND/OR decision spaces 514 break 515 elif last is None: 516 last = result 517 else: 518 # Merge in any extras 519 last = copy.deepcopy(last) 520 for option in result.extras: 521 last.directAdd(option) 522 result = last 523 # mei commented out to get depth specified in classHierachy take effect 524 #elif feature == 'depth': 525 # # Belief depth is MINIMUM across all default values 526 # if last is None: 527 # last = result 528 # else: 529 # last = min(last,result) 530 # result = last 531 else: 532 # For everything else, the first thing 533 break 534 if result is None: 535 if feature == 'actions': 536 pass 537 else: 538 raise KeyError,'%s has no feature %s' % (self.name,feature) 539 return result
540
541 - def initGoals(self,entities=[]):
542 """Sets the goal weights of this entity based on class 543 defaults. The resulting goals depend on the group of entities 544 passed in as the optional argument.""" 545 ## added by mei 546 if self.getGoals(): 547 return 548 549 goals = [] 550 # First, figure out the relevant goals and their total weight 551 # (for normalization) 552 keys = [] 553 for cls in self.classes: 554 for goal in self.hierarchy[cls].getGoals(): 555 goalList = self.instantiateGoal(goal,entities) 556 for subGoal in goalList: 557 key = str(subGoal) 558 try: 559 index = keys.index(key) 560 goals[index].weight += subGoal.weight 561 except ValueError: 562 keys.append(key) 563 goals.append(subGoal) 564 if len(goals) > 0: 565 # Then, add goal with normalized weight 566 self.setGoals(goals)
567
568 - def initEntities(self,entityList,depth=1,maxDepth=-1):
569 """Sets the entities known to be the list provided, and makes 570 the appropriate updates to goals, policy depth, etc.""" 571 ## added maxDepth param by mei 572 # Fill out recursive beliefs 573 574 if maxDepth == -1: 575 ## I am the top level agent, use my beliefs 576 maxDepth = self.getDefault('depth') 577 578 #print self.ancestry(),depth,maxDepth 579 580 if depth <= maxDepth: 581 newList = [] 582 # First, generate entity objects for my beliefs 583 for entity in entityList: 584 newEntity = copy.copy(entity) 585 newEntity.dynamics = copy.deepcopy(entity.dynamics) 586 # Stick this entity object into my beliefs 587 self.setEntity(newEntity) 588 newList.append(newEntity) 589 # Assume correct beliefs about states 590 for feature in entity.getStateFeatures(): 591 try: 592 value = entity.getState(feature) 593 newEntity.setState(feature,value) 594 except KeyError: 595 pass 596 # Finally, fill in specific beliefs according to my class 597 # defaults, and go to the next recursive level 598 for entity in newList: 599 self.initBeliefs(entity) 600 if maxDepth-depth > entity.getDefault('depth'): 601 ##if the depth needed by the top level agent is more than the entity's max 602 ##level of beliefs allowed 603 newMaxDepth = depth + entity.getDefault('depth') 604 else: 605 newMaxDepth = maxDepth 606 607 entity.initEntities(newList,depth+1,newMaxDepth) 608 # Add any goals related to the new entities 609 self.initGoals(entityList) 610 # Set the depth of lookahead 611 agents = [] 612 for agent in entityList: 613 if len(agent.actions.getOptions()) > 0: 614 agents.append(agent) 615 self.horizon = self.getDefault('horizon')*len(agents) 616 if not self.policy: 617 ## print self.name 618 ## self.policy = PWLPolicy(self,self.actions, 619 ## len(entityList),self.horizon) 620 self.policy = PolicyTable(self,self.actions,self.horizon) 621 self.entities.initializeOrder() 622 self.entities.state.freeze()
623 624
625 - def getDynamics(self,act,feature=None):
626 """Returns this entity's dynamics model for the given action 627 @param act: the action whose effect we are interested in 628 @type act: L{Action} 629 @param feature: if the optional feature argument is provided, then this method returns the dynamics for only the given feature; otherwise, returns the effect over all state features (but this latter capability is now deprecated) 630 @type feature: C{str} 631 @rtype: L{PWLDynamics} 632 """ 633 if feature: 634 try: 635 # Try to find dynamics specific to this particular action 636 dynFun = self.dynamics[feature][act] 637 except KeyError: 638 # If not, find a more general dynamics and then instantiate 639 try: 640 ## added by mei 641 dynFun = self.dynamics[feature][`act`] 642 except KeyError: 643 try: 644 dynFun = self.dynamics[feature][act['type']] 645 except KeyError: 646 try: 647 dynFun = self.dynamics[feature][None] 648 except KeyError: 649 # It's OK for an action to have no dynamics 650 # (e.g., the "wait" action) 651 if not self.dynamics.has_key(feature): 652 self.dynamics[feature] = {} 653 dynFun = IdentityDynamics(feature) 654 655 dynFun = dynFun.instantiate(self,act) 656 # Check whether dynamics is well formed 657 vector = self.state.domain()[0] 658 for leaf in dynFun.getTree().leaves(): 659 for key in leaf.rowKeys(): 660 if not vector.has_key(key): 661 pass 662 ## print 'Dynamics of %s\'s %s in response to %s has extraneous key, %s' % (self.ancestry(),feature,str(act),str(key)) 663 for key in leaf.colKeys(): 664 if not vector.has_key(key): 665 pass 666 ## print 'Dynamics of %s\'s %s in response to %s has extraneous key, %s' % (self.ancestry(),feature,str(act),str(key)) 667 for branch in dynFun.getTree().branches().values(): 668 if not isinstance(branch,Distribution): 669 for key in branch.weights.keys(): 670 if not vector.has_key(key): 671 pass 672 ## print 'Dynamics of %s\'s %s in response to %s has extraneous key, %s' % (self.ancestry(),feature,str(act),str(key)) 673 self.dynamics[feature][act] = dynFun 674 else: 675 raise DeprecationWarning,'Do not compute dynamics over an entire action at the individual agent level' 676 return dynFun
677 678
679 - def hypotheticalPostCom(self,beliefs,msgs,epoch=-1,debug=Debugger()):
680 """ 681 @return: the potential change in the agent's beliefs based on received messages""" 682 explanation = {} 683 for sender in msgs.keys(): 684 explanation[sender] = {'effect':None} 685 for msg in msgs[sender]: 686 # Iterate through all messages sent by this sender 687 acceptance = None 688 label = msg.pretty() 689 # Create a sub-explanation for this individual message 690 subExp = {} 691 # Do we want to return the explanation in the delta? 692 explanation[label] = subExp 693 # Determine whether receiver believes message 694 try: 695 entity = self.getEntity(sender) 696 except KeyError: 697 # What to do here? 698 continue 699 # mei editted for testing, exception in acceptMessage 700 #acceptance,accExp = self.acceptMessage(entity,msg,debug) 701 acceptance = True 702 subExp['decision'] = acceptance 703 subExp['breakdown'] = {} 704 #subExp['breakdown'] = accExp 705 if acceptance: 706 # Update beliefs if accepting message 707 debug.message(4,'%s accepts %s' % (self.ancestry(), 708 label)) 709 delta,subExp = self.incorporateMessage(msg) 710 if explanation[sender]['effect'] is None: 711 explanation[sender]['effect'] = delta 712 else: 713 raise NotImplementedError,'Currently unable to incorporate multiple messages from the same sender' 714 715 716 ## mei added 717 ## update all my beliefs recursively 718 for entity in self.entities: 719 previous = copy.copy(msg['force']) 720 msg.forceAccept() 721 self.entities[entity].postComStateEstimator(beliefs[entity],{sender:[msg]}, 722 epoch,debug) 723 msg['force'] = previous 724 725 726 727 self.updateTrust(sender,explanation[sender]['effect'], 728 True) 729 # Update any beliefs about myself 730 try: 731 entity = self.getEntity(self.name) 732 except KeyError: 733 entity = None 734 if entity: 735 previous = copy.copy(msg['force']) 736 msg.forceAccept() 737 subExp = entity.hypotheticalPostCom(beliefs[sender], 738 {sender:[msg]}, 739 epoch,debug) 740 explanation[sender]['effect'][sender] = {self.name:subExp[sender]['effect']} 741 msg['force'] = previous 742 elif entity: 743 debug.message(4,'%s rejects %s' % \ 744 (self.ancestry(),label)) 745 explanation[sender]['effect'] = {} 746 self.updateTrust(sender,explanation[sender]['effect'], 747 False) 748 # Update sender's beliefs about entities' beliefs 749 try: 750 entity = entity.getEntity(self.name) 751 except KeyError: 752 entity = None 753 if entity: 754 # This entity has a belief about me, so I need to 755 # update it 756 entity.postComStateEstimator(entity,{sender:[msg]}, 757 epoch,debug) 758 return explanation
759 760 761 762 763 ## def applyDefaults(self,className=None,hierarchy=None): 764 ## """Applies the generic model in the society of the given class name""" 765 ## 766 ## PsychEntity.applyDefaults(self,className,hierarchy) 767 ## if self.option_messages: 768 ## for msg in self.getDefault('option_messages'): 769 ## msg1 = ThespianMessage (msg) 770 ## ## self.option_messages.append(msg1) 771 ## ## directly add messages as options 772 ## self.actions.directAdd([msg1]) 773 ## ## print self.name 774 ## ## print self.actions.getOptions() 775 ## ## print 776 ## 777 ## 778 ## 779 ## def __repr__(self): 780 ## """Returns a string representation of this entity""" 781 ## content = PsychEntity.__repr__(self) 782 ## if self.option_messages: 783 ## content += '\n\tOption_messages:\n' 784 ## content += '\t\t'+`self.option_messages` 785 ## return content 786