1 from teamwork.agent.DefaultBased import *
2 from teamwork.agent.Entities import *
3 from teamwork.multiagent.sequential import *
4 from teamwork.multiagent.GenericSociety import *
5 from teamwork.action.PsychActions import *
6
7 from teamwork.dynamics.pwlDynamics import *
8 from teamwork.policy.policyTable import PolicyTable
9
10 sceneID = 3
11
13
14
15
17 """
18 @param actionDict: the performed actions, indexed by actor name
19 @type actionDict: C{dict:strS{->}L{Action}[]}
20 @return: observations this entity would make of the given actions, in the same format as the provided action dictionary
21 @rtype: C{dict}
22 """
23 observations = {}
24 for actor,action in actionDict.items():
25
26
27
28
29
30
31 if actor == self.name:
32
33 observations[actor] = action
34 else:
35 observation = []
36 for subAct in action:
37 if subAct.has_key('_observed'):
38
39 if self.name in subAct['_observed']:
40 observation.append(subAct)
41 continue
42 if subAct.has_key('_unobserved'):
43
44 if self.name in subAct['_unobserved']:
45 continue
46 if subAct['object'] == self.name:
47
48
49 observation.append(subAct)
50 else:
51
52 for omega,entries in self.observations.items():
53 if str(subAct) == omega:
54 for entry in entries.keys():
55 if subAct.matchTemplate(entry):
56 if entry['_observable']:
57
58 observation.append(subAct)
59 else:
60
61 pass
62 break
63 else:
64
65 break
66 else:
67
68 observation.append(subAct)
69 if len(observation) > 0:
70
71 observations[actor] = observation
72 return observations
73
74
75 - def applyPolicy(self,state=None,actions=[],history=None,debug=None,
76 explain=False):
77 """Generates a decision chosen according to the agent's current policy
78 @param state: the current state vector
79 @type state: L{Distribution}(L{KeyedVector})
80 @param actions: the possible actions the agent can consider (defaults to all available actions)
81 @type actions: L{Action}[]
82 @param history: a dictionary of actions that have already been performed (and which should not be performed again if they are labeled as not repeatable)
83 @type history: L{Action}[]:bool
84 @param explain: flag indicating whether an explanation should be generated
85 @type explain: bool
86 @return: a list of actions and an explanation, the latter provided by L{execute<PolicyTable.execute>}
87 @rtype: C{(L{Action}[],Element)}
88 """
89 if state is None:
90 state = self.getAllBeliefs()
91
92
93 if len(actions) == 1:
94 pass
95 else:
96 actions = self.actions.getOptions()
97 actions = self.updateActionChoices(actions,state)
98
99 return self.policy.execute(state=state,choices=actions,
100 history=history,debug=debug,explain=explain)
101
102
104
105 if sceneID == 7:
106 return self.updateActionChoices7(choices,state)
107
108 if not state:
109 state=self.getAllBeliefs()
110
111 actions = []
112
113 key = StateKey({'entity':self.name,'feature':'power'})
114 selfLocation = state['state'].getMarginal(key).expectation()
115 if selfLocation == 0:
116 actions.append([Action({'actor':self.name,'type':'wait'})])
117 return actions
118
119 if self.name in ['red','granny']:
120 key = StateKey({'entity':self.name,'feature':'eaten'})
121 selfEaten = state['state'].getMarginal(key).expectation()
122 if selfEaten >= 1:
123 try:
124 key = StateKey({'entity':'wolf','feature':'alive'})
125 wolfAlive = state['state'].getMarginal(key).expectation()
126 if wolfAlive <=0:
127 actions.append([Action({'actor':self.name,'type':'escape','object':'wolf'})])
128 except:
129 pass
130 actions.append([Action({'actor':self.name,'type':'wait'})])
131 return actions
132
133
134 if self.name in ['wolf','red']:
135 key = StateKey({'entity':self.name,'feature':'alive'})
136 selfAlive = state['state'].getMarginal(key).expectation()
137 if selfAlive <=0:
138 actions.append([Action({'actor':self.name,'type':'wait'})])
139 return actions
140
141
142 for option in choices:
143 if option[0]['type'] == 'escape':
144 continue
145
146 if option[0]['type'] in ['give-cake','eat-cake']:
147 key = StateKey({'entity':self.name,'feature':'has-cake'})
148 selfHasCake = state['state'].getMarginal(key).expectation()
149 if selfHasCake < 1:
150 continue
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167 if not option[0].has_key('object'):
168 type = option[0]['type']
169 if type.find('move')>-1 or type.find('enter')>-1:
170 key = StateKey({'entity':self.name,'feature':'indoor'})
171 selfLocation = state['state'].getMarginal(key).expectation()
172 if selfLocation>.5:
173 continue
174 if type.find('exist')>-1:
175 key = StateKey({'entity':self.name,'feature':'indoor'})
176 selfLocation = state['state'].getMarginal(key).expectation()
177 if selfLocation<.5:
178 continue
179 actions.append(option)
180
181
182 key = StateKey({'entity':self.name,'feature':'location'})
183 selfLocation = state['state'].getMarginal(key).expectation()
184 key = StateKey({'entity':option[0]['object'],'feature':'location'})
185 objectLocation = state['state'].getMarginal(key).expectation()
186 if not abs(selfLocation - objectLocation)<0.0001:
187 continue
188
189 if option[0]['object'] in ['wolf','red']:
190 key = StateKey({'entity':option[0]['object'],'feature':'alive'})
191 objectAlive = state['state'].getMarginal(key).expectation()
192 if objectAlive <=0:
193 continue
194
195 if option[0]['object'] in ['red','granny']:
196 key = StateKey({'entity':option[0]['object'],'feature':'eaten'})
197 objectEaten = state['state'].getMarginal(key).expectation()
198 if objectEaten >0:
199 continue
200
201
202
203
204
205
206
207
208
209 if option[0]['type'] == 'inform':
210 key = StateKey({'entity':self.name,'feature':'being-enquired'})
211 selfEnquired = state['state'].getMarginal(key).expectation()
212 if selfEnquired <1:
213 continue
214 obj = option[0]['object']
215 key = StateKey({'entity':obj,'feature':'enquired'})
216 objEnquired = state['state'].getMarginal(key).expectation()
217 if not objEnquired >.5:
218 continue
219
220 if option[0]['type'] == 'help' and self.name=='wolf':
221 key = StateKey({'entity':self.name,'feature':'helped'})
222 selfHelped = state['state'].getMarginal(key).expectation()
223
224 if selfHelped >.5:
225 continue
226
227 if option[0]['type'] in ['eat'] and option[0]['object'] == 'granny':
228 key = StateKey({'entity':self.name,'feature':'know-granny'})
229 selfKnowGranny = state['state'].getMarginal(key).expectation()
230
231 if selfKnowGranny < 1:
232 continue
233 key = StateKey({'entity':self.name,'feature':'indoor'})
234 selfIndoor = state['state'].getMarginal(key).expectation()
235 if selfIndoor < 1:
236 continue
237
238 if option[0]['type'] in ['talkabout-granny']:
239 key = StateKey({'entity':'wolf','feature':'know-granny'})
240 wolfKnowGranny = state['state'].getMarginal(key).expectation()
241 if wolfKnowGranny >.5:
242 continue
243
244
245 actions.append(option)
246
247
248 return actions
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
276 convert={'unsafesex':1,'safesex':2,'drink':3,'physicaltouch':4}
277
278 if not state:
279 state=self.getAllBeliefs()
280 actions = []
281
282 for option in choices:
283
284 if option[0]['type'].find('accept')>-1 or option[0]['type'].find('reject')>-1:
285 key = StateKey({'entity':self.name,'feature':'topic'})
286 topic = state['state'].getMarginal(key).expectation()
287 if topic == 0:
288 continue
289
290 offer,thisTopic = string.split(option[0]['type'],'-')
291 if not convert[thisTopic] == topic:
292 continue
293
294 actions.append(option)
295
296
297 return actions
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
356 """Instantiates all of the state defaults relevant to this entity"""
357 """do not check if the feature has already been set"""
358 for cls in self.classes:
359 try:
360 featureList = self.hierarchy[cls].getStateFeatures()
361 except KeyError:
362 featureList = []
363 for feature in featureList:
364 value = self.hierarchy[cls].getState(feature)
365 self.setState(feature,value)
366 self.setModel(None)
367
368
370 """Computes a set of constraints on possible goal weights for this agent that, if satisfied, will cause the agent to prefer the desired action in the given state. Each constraint is dictionary with the following elements:
371 - delta: the total difference that must be made up
372 - slope: dictionary of coefficients for each goal weight in the sum that must make up that difference
373 - plane: the vector of weights, such that the product of this vector and the goal weight vector must exceed 0 for the desired action to be preferred
374 @param desired: the action that the agent should prefer
375 @type desired: L{Action}[]
376 @param horizon: the horizon of lookahead to use (if not provided, the agent's default horizon is used)
377 @type horizon: int
378 @param state: the current state of this agent's beliefs (if not provided, defaults to the result of L{getAllBeliefs}
379 @type state: dict
380 @return: a list of constraints
381 @rtype: dict[]
382 """
383 if horizon < 0:
384 horizon = self.horizon
385 if state is None:
386 state = self.getAllBeliefs()
387 goals = self.getGoalVector()['total']
388 if len(goals.domain()) != 1:
389 raise NotImplementedError,\
390 'Unable to handle uncertain goals when fitting'
391 goals = goals.domain()[0]
392
393 matrices = {}
394 for action in self.actions.getOptions():
395 sequence = self.multistep(horizon=horizon,
396 start={self.name:action},
397 state=copy.deepcopy(state))
398 value = None
399 if self.valueType == 'average':
400 for t in range(len(sequence)):
401
402 assert len(sequence[t]['state'].domain()) == 1
403 current = copy.deepcopy(sequence[t]['state'].domain()[0])
404
405 if value is None:
406 value = current
407 else:
408 current.unfreeze()
409 current.fill(value.keys())
410 current.freeze()
411 value += current
412
413 for key in filter(lambda k:isinstance(k,ObservationKey),
414 goals.keys()):
415 if not value.has_key(key):
416 value.unfreeze()
417 value[key] = 0.
418 value.freeze()
419 for act in sum(sequence[t]['action'].values(),[]):
420 if act['type'] == key['type']:
421 value[key] += 1.
422 elif self.valueType == 'final':
423
424 value = sequence[-1]['state']
425 else:
426 raise NotImplementedError,\
427 'I do not know how to fit "%s" expected value' \
428 % (self.valueType)
429 matrices[str(action)] = value
430
431
432
433 allConstraints = {}
434 for desired in self.actions.getOptions():
435 constraints = []
436 for action in self.actions.getOptions():
437 if action != desired:
438 projection = matrices[str(desired)] - matrices[str(action)]
439 goals.fill(projection.keys())
440 diff = goals*projection
441 constraint = {'delta':diff,
442 'value':True,
443 'slope':{},
444 }
445 for key in goals.keys():
446 constraint['slope'][key] = projection[key]
447 constraints.append(constraint)
448 allConstraints[str(desired)] = constraints
449 return allConstraints
450
451
452 - def applyRealGoals(self,entity=None,world=None,debug=Debugger(),fixedgoals=['sameLocation','actAlive','specialRule']):
453 """
454 @param entity: the entity whose goals are to be evaluated (default is self)
455 @type entity: L{GoalBasedAgent}
456 @param world: the context for evaluating the goals (default is the beliefs of I{entity})
457 @type world: L{teamwork.multiagent.PsychAgents.PsychAgents}
458 @return: expected reward of the I{entity} in current I{world}
459 @rtype: L{Distribution} over C{float}"""
460 if not entity:
461 entity = self
462 if world is None:
463 world = self.entities
464 state = world.getState()
465 goals = entity.getGoalVector()
466
467
468
469 gstate = goals['state']
470 for item, prob in gstate.items():
471 del gstate[item]
472 for goal in fixedgoals:
473 key = StateKey({'entity':self.name,'feature':goal})
474 try:
475 item.__delitem__(key)
476 except:
477 pass
478 gstate[item] = prob
479
480 goals['state'].fill(state.domain()[0].keys(),0.)
481
482 return goals['state']*state
483
484
486 """Finds the most specific class defaults for the specified
487 feature; raises KeyError exception if no default exists"""
488 if self.hierarchy is None:
489
490 raise KeyError,'%s has no feature %s' % (self.name,feature)
491 result = None
492 last = None
493 for cls in self.classes:
494
495 try:
496 result = self.hierarchy[cls].__dict__[feature]
497 except KeyError:
498
499 try:
500 result = self.hierarchy[cls].attributes[feature]
501 except KeyError:
502
503 continue
504 if feature in ['models','dynamics']:
505
506 if len(result) > 0:
507 break
508 elif feature == 'imageName':
509 if result is not None:
510 break
511 elif feature == 'actions':
512 if result.branchType:
513
514 break
515 elif last is None:
516 last = result
517 else:
518
519 last = copy.deepcopy(last)
520 for option in result.extras:
521 last.directAdd(option)
522 result = last
523
524
525
526
527
528
529
530
531 else:
532
533 break
534 if result is None:
535 if feature == 'actions':
536 pass
537 else:
538 raise KeyError,'%s has no feature %s' % (self.name,feature)
539 return result
540
542 """Sets the goal weights of this entity based on class
543 defaults. The resulting goals depend on the group of entities
544 passed in as the optional argument."""
545
546 if self.getGoals():
547 return
548
549 goals = []
550
551
552 keys = []
553 for cls in self.classes:
554 for goal in self.hierarchy[cls].getGoals():
555 goalList = self.instantiateGoal(goal,entities)
556 for subGoal in goalList:
557 key = str(subGoal)
558 try:
559 index = keys.index(key)
560 goals[index].weight += subGoal.weight
561 except ValueError:
562 keys.append(key)
563 goals.append(subGoal)
564 if len(goals) > 0:
565
566 self.setGoals(goals)
567
623
624
626 """Returns this entity's dynamics model for the given action
627 @param act: the action whose effect we are interested in
628 @type act: L{Action}
629 @param feature: if the optional feature argument is provided, then this method returns the dynamics for only the given feature; otherwise, returns the effect over all state features (but this latter capability is now deprecated)
630 @type feature: C{str}
631 @rtype: L{PWLDynamics}
632 """
633 if feature:
634 try:
635
636 dynFun = self.dynamics[feature][act]
637 except KeyError:
638
639 try:
640
641 dynFun = self.dynamics[feature][`act`]
642 except KeyError:
643 try:
644 dynFun = self.dynamics[feature][act['type']]
645 except KeyError:
646 try:
647 dynFun = self.dynamics[feature][None]
648 except KeyError:
649
650
651 if not self.dynamics.has_key(feature):
652 self.dynamics[feature] = {}
653 dynFun = IdentityDynamics(feature)
654
655 dynFun = dynFun.instantiate(self,act)
656
657 vector = self.state.domain()[0]
658 for leaf in dynFun.getTree().leaves():
659 for key in leaf.rowKeys():
660 if not vector.has_key(key):
661 pass
662
663 for key in leaf.colKeys():
664 if not vector.has_key(key):
665 pass
666
667 for branch in dynFun.getTree().branches().values():
668 if not isinstance(branch,Distribution):
669 for key in branch.weights.keys():
670 if not vector.has_key(key):
671 pass
672
673 self.dynamics[feature][act] = dynFun
674 else:
675 raise DeprecationWarning,'Do not compute dynamics over an entire action at the individual agent level'
676 return dynFun
677
678
680 """
681 @return: the potential change in the agent's beliefs based on received messages"""
682 explanation = {}
683 for sender in msgs.keys():
684 explanation[sender] = {'effect':None}
685 for msg in msgs[sender]:
686
687 acceptance = None
688 label = msg.pretty()
689
690 subExp = {}
691
692 explanation[label] = subExp
693
694 try:
695 entity = self.getEntity(sender)
696 except KeyError:
697
698 continue
699
700
701 acceptance = True
702 subExp['decision'] = acceptance
703 subExp['breakdown'] = {}
704
705 if acceptance:
706
707 debug.message(4,'%s accepts %s' % (self.ancestry(),
708 label))
709 delta,subExp = self.incorporateMessage(msg)
710 if explanation[sender]['effect'] is None:
711 explanation[sender]['effect'] = delta
712 else:
713 raise NotImplementedError,'Currently unable to incorporate multiple messages from the same sender'
714
715
716
717
718 for entity in self.entities:
719 previous = copy.copy(msg['force'])
720 msg.forceAccept()
721 self.entities[entity].postComStateEstimator(beliefs[entity],{sender:[msg]},
722 epoch,debug)
723 msg['force'] = previous
724
725
726
727 self.updateTrust(sender,explanation[sender]['effect'],
728 True)
729
730 try:
731 entity = self.getEntity(self.name)
732 except KeyError:
733 entity = None
734 if entity:
735 previous = copy.copy(msg['force'])
736 msg.forceAccept()
737 subExp = entity.hypotheticalPostCom(beliefs[sender],
738 {sender:[msg]},
739 epoch,debug)
740 explanation[sender]['effect'][sender] = {self.name:subExp[sender]['effect']}
741 msg['force'] = previous
742 elif entity:
743 debug.message(4,'%s rejects %s' % \
744 (self.ancestry(),label))
745 explanation[sender]['effect'] = {}
746 self.updateTrust(sender,explanation[sender]['effect'],
747 False)
748
749 try:
750 entity = entity.getEntity(self.name)
751 except KeyError:
752 entity = None
753 if entity:
754
755
756 entity.postComStateEstimator(entity,{sender:[msg]},
757 epoch,debug)
758 return explanation
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786