1 import copy
2 import random
3 import sys
4
5 from teamwork.multiagent.sequential import *
6 from teamwork.agent.Entities import *
7 from teamwork.agent.AgentClasses import *
8 from teamwork.dynamics.pwlDynamics import *
9 from teamwork.math.Interval import *
10 from teamwork.math.KeyedMatrix import *
11 from teamwork.math.KeyedTree import *
12 from teamwork.action.PsychActions import *
13 from teamwork.policy.StochasticPolicy import *
14 from teamwork.shell.TerminalShell import *
15
16 __GOOD__ = None
17 __PUNISH__ = 1
18
19 population = {'OptOptDonor':0,
20 'OptPesDonor':0,
21 'PesOptDonor':0,
22 'PesPesDonor':0
23 }
24 donations = [.001,.003,.005,.007,.01]
25
26
27 fines = [.01]
28 namePrefix = 'agent'
29 neighborSpacing = 1
30
31
32 classHierarchy['PublicGood'] = {
33 'parent': [],
34 'state':{'wealth':0.5},
35 'beliefs':{None:{'wealth':None}},
36 'dynamics':{'wealth':{}},
37 'depth':1
38 }
39
40 classHierarchy['Donee'] = {
41 'parent':['PublicGood'],
42 'state':{'wealth':0.,
43 'goodExists':0.},
44 'goals':[],
45 'models':{'normal':{'goals':[]}},
46 'model':'normal',
47 'beliefs':{'Donor':{'model':'altruistic'}}
48 }
49
50 classHierarchy['Donor'] = {
51 'parent':['PublicGood'],
52 'relationships': {'donee':'Donee',
53 'neighbor':'Donor'},
54 'actions': [],
55 'state':{'wealth':0.9},
56
57
58 'models':{'AA': {}, 'AE': {}, 'EA':{}, 'EE': {},
59 'deliberate':{'policy':['observation depth 1 type disburse -> '\
60 +'{"type":"lookahead"}']}},
61 'beliefs':{'Donee':{'model':'normal'}},
62 'model':'deliberate'
63 }
64
65 classHierarchy['OptOptDonor'] = {
66 'parent':['Donor'],
67 'beliefs':{'Donor':{'model':'AA'}}
68 }
69
70 classHierarchy['OptPesDonor'] = {
71 'parent':['Donor'],
72 'beliefs':{'Donor':{'model':'AE'}}
73 }
74
75 classHierarchy['PesOptDonor'] = {
76 'parent':['Donor'],
77 'beliefs':{'Donor':{'model':'EA'}}
78 }
79
80 classHierarchy['PesPesDonor'] = {
81 'parent':['Donor'],
82 'beliefs':{'Donor':{'model':'EE'}}
83 }
84
85 __count__ = -1
96
98 """Domain-specific class that includes an 'amount' field."""
99 format = ['actor','type','object','amount']
100
104
106 for key in self.fields.keys():
107 if not self[key] is None and not other[key] is None \
108 and self[key] != other[key]:
109 return None
110 return 1
111
113 """Domain-specific class that sets up the game stages"""
114
115 fixedNetwork = 1
116
118 if self.fixedNetwork:
119 return None
120
121 for act in actions:
122 if act['object'] and act['object'] != 'Public':
123 act['object'] = act['actor'].neighbors[act['object']]
124 return 1
125
130
132
133 remaining = self.keys()
134 try:
135 remaining.remove('Public')
136 except ValueError:
137
138 return None
139 for entity in self.members():
140 if entity.name == 'Public' or entity.parent:
141 continue
142 if len(remaining) > 1:
143
144 index = int(random.random()*len(remaining))
145 if remaining[index] == entity.name:
146 index += 1
147 if index == len(remaining):
148 index = 0
149 else:
150
151 index = 0
152 if remaining[index] == entity.name:
153
154 flag = 1
155 done = None
156 while not done:
157
158 other = self.members()[int(random.random()*len(self))]
159 if other.name != 'Public' and \
160 other.name != entity.name:
161
162 neighbor = other.neighbors.keys()[0]
163 remaining[index] = other.neighbors[neighbor]
164 other.neighbors[neighbor] = entity.name
165 done = 1
166
167 try:
168 entity.neighbors[entity.neighbors.keys()[0]] = remaining[index]
169 except AttributeError,e:
170 print entity.ancestry()
171 raise AttributeError,e
172 del remaining[index]
173 if debug:
174 for entity in self.members():
175 debug.message(8,'New neighbor of %s: %s' % \
176 (entity.name,entity.neighbors.values()[0]))
177 return 1
178
180 """Orders the entities so that there is a first parallel
181 donation stage and then a second parallel
182 punishment/disbursement stage"""
183
184 donors = []
185 donees = []
186 for name in self.keys():
187 entity = self[name]
188 if entity.instanceof('Donor'):
189 donors.append(name)
190 else:
191 donees.append(name)
192 self.keyOrder = []
193
194 round = []
195 for agent in donors:
196 choices = []
197 for act in self[agent].actions:
198
199 if act['type'] in ['wait','donate']:
200 choices.append(act)
201 round.append({'name':agent,'choices':choices})
202 self.keyOrder.append(round)
203
204 round = []
205 for agent in donees:
206 for act in self[agent].actions:
207 if act['type'] == 'disburse':
208 round.append({'name':agent,'choices':[act]})
209 for agent in donors:
210 choices = []
211 for act in self[agent].actions:
212
213 if act['type'] in ['wait','punish']:
214 choices.append(act)
215 round.append({'name':agent,'choices':choices})
216 self.keyOrder.append(round)
217
218 self.order = copy.deepcopy(self.keyOrder)
219
220 try:
221 self['Public'].freezeModels()
222 except KeyError:
223 pass
224
225
227 beliefClass = PublicGoodAgents
228 actionClass = DonateAction
229
230
231
232 modelChange = 1
233 learningRate = 0.2
234 valueType = 'final'
235 mentalType = 'aggregate'
236
238 """These agents don't really do any backward projection, so
239 let's shortcut through the annoying copying that is required
240 when freezing the initial version of myself"""
241 self.initial = self
242
244 """Updates beliefs in response to observation
245 (Within this model, the agent and its belief state are one and
246 the same)"""
247 self.saveObservations(actions)
248 delta = {}
249 if len(self.getEntities()) == 0:
250 return beliefs,delta
251
252 aggActList = []
253 pubAct = None
254 toDistribute = 0.
255 total = 0.
256 for act in actions:
257
258 if isinstance(act['actor'],str):
259 actor = act['actor']
260 else:
261 actor = act['actor'].name
262 if actor == self.name:
263
264 myAct = act
265 elif actor == 'Public':
266
267 pubAct = act
268 elif act['type'] == 'punish':
269 if act['object'] == self.name:
270
271 if actor in self.relationships['neighbor']:
272 aggActList.append(copy.copy(act))
273 total += act['amount']
274 else:
275
276 toDistribute += act['amount']
277 elif actor in self.relationships['neighbor']:
278 aggActList.append(self.actionClass({'type':'wait',
279 'actor':actor}))
280 elif act['type'] == 'donate':
281 if actor in self.relationships['neighbor']:
282 aggActList.append(copy.copy(act))
283 total += act['amount']
284 else:
285
286 toDistribute += act['amount']
287 elif actor in self.relationships['neighbor']:
288
289 aggActList.append(copy.copy(act))
290 else:
291
292 pass
293
294 if toDistribute > 0.:
295 for act in aggActList:
296 if total > 0.:
297 if act['type'] != 'wait':
298 act['amount'] += toDistribute*act['amount']/total
299 else:
300 if act['type'] == 'wait':
301 if pubAct:
302 act['object'] = self.name
303 act['type'] = 'punish'
304 else:
305 act['object'] = 'Public'
306 act['type'] = 'donate'
307 elif act['type'] == 'punish':
308 act['object'] = self.name
309 act['amount'] = toDistribute/float(len(aggActList))
310 if act['amount'] > 1.:
311 print obs
312 raise ValueError,'Illegal amount: %s' % `act`
313 aggActList.append(myAct)
314 if pubAct:
315 aggActList.append(pubAct)
316
317 for act in aggActList:
318 delta[`act`] = beliefs.updateBeliefs(act,debug)
319
320 result = Stereotyper.preComStateEstimator(self,beliefs,aggActList,
321 epoch,debug)
322 for key,value in delta.items():
323 try:
324 value.update(result[key])
325 except KeyError:
326
327 pass
328
329 for name in beliefs.getEntities():
330 entity = beliefs.getEntity(name)
331 entity,changes = entity.preComStateEstimator(entity,aggActList,
332 epoch,debug)
333
334 for obsType in changes.keys():
335 if not delta[obsType].has_key(name):
336 delta[obsType][name] = {}
337 for key in changes[obsType].keys():
338 delta[obsType][name][key] = changes[obsType][key]
339 if len(delta[obsType][name].keys()) == 0:
340 del delta[obsType][name]
341 self.invalidateCache()
342 return beliefs,delta
343
345 """Sets the entities known to be the list provided, and makes
346 the appropriate updates to goals, policy depth, etc."""
347
348 maxDepth = self.getDefault('depth')
349 if depth <= maxDepth:
350 newList = []
351
352 for entity in entityList:
353
354 if 'Donee' in self.classes or \
355 ('Donor' in entity.classes and \
356 entity.name != self.name and \
357 not entity.name in self.relationships['neighbor']):
358 continue
359
360 newEntity = copy.copy(entity)
361 self.setEntity(newEntity)
362 newList.append(newEntity)
363
364 if self.mentalType == 'aggregate' and \
365 'Donor' in newEntity.classes and \
366 newEntity.name != self.name:
367 newEntity.relationships = {'donee':newEntity.relationships['donee'],
368 'neighbor': [self.name]}
369
370 for feature in entity.getStateFeatures():
371 try:
372 value = entity.getState(feature)
373 newEntity.setState(feature,value)
374 except KeyError:
375 pass
376
377
378 for entity in newList:
379 entity.models = copyModels(entity.getDefault('models'))
380 entity.initModels(newList)
381 self.initBeliefs(entity)
382 if entity.name != self.name:
383 for entry in entity.policy.entries[:]:
384 if not entry['action']['object'] in \
385 ['Public',self.name,None]:
386 entity.policy.entries.remove(entry)
387 elif entry.has_key('actor') and \
388 entry['actor'] != self.name:
389 entity.policy.entries.remove(entry)
390 elif entry['class'] == 'conjunction':
391 clause = entry['clauses'][0]
392 if clause['label'] == 'ifWait' and \
393 clause['actor'] != self.name:
394 entity.policy.entries.remove(entry)
395 entity.initEntities(newList,depth+1)
396
397 self.initGoals(entityList)
398
399 horizon = self.getDefault('horizon')
400 if not self.policy:
401 self.policy = self.policyClass(entity=self.name,
402 actions=self.actions,
403 relationships=self.relationships,
404 size=1,
405 depth=horizon)
406
407 if not self.parent:
408 try:
409 self.setModel(self.getDefault('model'))
410 except KeyError:
411 pass
412
413 if not self.model:
414 raise UserWarning , 'No model for %s' % (self.ancestry())
415 self.entities.initializeOrder()
416
418 """Instantiates the relationships of this entity regarding the
419 provided list of entities"""
420 GenericEntity.initRelationships(self,entityList)
421 if self.mentalType != 'individual':
422
423 try:
424 myIndex = int(self.name[len(namePrefix):])
425 except ValueError:
426
427 return
428 neighbor = myIndex + neighborSpacing
429 while neighbor >= agentCount():
430 neighbor -= agentCount()
431 neighbor = '%s%d' % (namePrefix,neighbor)
432 try:
433 for name in self.relationships['neighbor'][:]:
434 if name != neighbor:
435 self.relationships['neighbor'].remove(name)
436 except KeyError:
437 pass
438
439 self.neighbors = {}
440 if self.name != 'Public':
441 for neighbor in self.relationships['neighbor']:
442 self.neighbors[neighbor] = neighbor
443
445 delta = {}
446 stage = 'donate'
447 total = 0.
448 count = 0
449 actDict = {}
450 debug.message(8,'Updating mental models held by %s' % self.ancestry())
451 for act in actions:
452 if act['type'] in ['disburse','punish']:
453 stage = 'punish'
454 try:
455 actDict[act['actor'].name] = act
456 except AttributeError:
457 actDict[act['actor']] = act
458
459 try:
460 obsList = self.getObservations()[1]['content']
461 myLast = None
462 except IndexError:
463 myLast = self.actionClass({'type':'wait',
464 'actor':self.name})
465 if not myLast:
466 for act in obsList:
467 if act['actor'] == self.name:
468 if stage == 'punish' and act['type'] == 'wait':
469
470 myLast = self.actionClass({'type':'donate',
471 'amount':0.,
472 'object':'Public',
473 'actor':self.name})
474 else:
475 myLast = act
476 break
477 if stage == 'donate':
478 lastDonation = {}
479
480 try:
481 obsList = self.getObservations()[2]['content']
482 except IndexError:
483 obsList = []
484 for name in self.neighbors.values():
485 lastDonation[name] = 0.
486 for act in obsList:
487 if act['actor'] in self.neighbors.values():
488 if act['type'] == 'wait':
489 lastDonation[act['actor'].name] = 0.
490 else:
491 lastDonation[act['actor'].name] = act['amount']
492 break
493 models = self.extractModels()
494
495 for entity in self.getEntityBeliefs():
496 if not entity.name in [self.name,'Public']:
497 curModel = models[entity.name]
498 debug.message(8,'Examining model of %s' % entity.name)
499 debug.message(7,'My current model is %s' % `curModel`)
500 debug.message(7,'My last action was %s' % `myLast`)
501 debug.message(7,'I observed %s' % `actDict[entity.name]`)
502 relevant = []
503
504 if stage == 'punish':
505 relevant = entity.policy.entries[:3]
506 interval = str2Interval(relevant[0]['amount'])
507 if myLast['amount'] in interval:
508 size = 'Big'
509 else:
510 size = 'Small'
511 else:
512 entry = entity.policy.entries[5]
513 interval = str2Interval(entry['amount'])
514 if lastDonation[self.neighbors[entity.name]] in interval:
515 size = 'Big'
516 relevant = [entity.policy.entries[3]]
517 relevant.append(entity.policy.entries[5])
518 else:
519 size = 'Small'
520 relevant = [entity.policy.entries[4]]
521 relevant.append(entity.policy.entries[6])
522
523 act = copy.copy(actDict[entity.name])
524 act['actor'] = None
525 if stage == 'donate':
526 debug.message(7,'The last donation was %s (%6.4f)' %
527 (size,lastDonation[self.neighbors[entity.name]]))
528 for entry in relevant:
529 debug.message(4,'Examining policy entry: %s' % \
530 `entry`)
531
532 if myLast['type'] == 'punish':
533 key = 'donateIfPun'+size
534 index = 0
535 else:
536 key = 'donateIfNotPun'+size
537 index = 1
538 debug.message(4,'Modifying entry: %s' \
539 % `relevant[index]`)
540
541 if act != relevant[index]['action']:
542 if act['amount']:
543 donation = act['amount']
544 else:
545 donation = 0.
546 amt = (1.-self.learningRate)*curModel[key]\
547 +self.learningRate*donation
548 debug.message(7,'New amount = %6.4f' % (amt))
549 relevant[index]['action'] = copy.copy(act)
550 if amt > 0.:
551 relevant[index]['action']['type'] = stage
552 relevant[index]['action']['amount'] = amt
553 relevant[index]['action']['object'] = 'Public'
554 else:
555 relevant[index]['action']['type'] = 'wait'
556 relevant[index]['action']['object'] = None
557 relevant[index]['action']['amount'] = None
558 delta[entity.name] = {key:amt}
559 debug.message(8,"New model: %s" % `relevant`)
560 else:
561
562 for entry in relevant:
563 debug.message(4,'Examining policy entry: %s' % \
564 `entry`)
565 hiInterval = str2Interval(relevant[0]['amount'])
566 loInterval = str2Interval(relevant[1]['amount'])
567
568 if myLast['amount'] in hiInterval:
569 myIndex = 0
570 else:
571 myIndex = 1
572 try:
573 donIndex = donations.index(myLast['amount'])
574 except ValueError:
575 donIndex = -1
576
577 if act['type'] != relevant[myIndex]['action']['type']:
578 if act['type'] == 'punish':
579
580 try:
581 threshold = donations[donIndex+1]
582 except IndexError:
583 threshold = 2.*myLast['amount']
584 threshold -= Interval.QUANTUM
585 else:
586
587 if donIndex > 0:
588 threshold = donations[donIndex-1]
589 else:
590 threshold = -Interval.QUANTUM
591 threshold += Interval.QUANTUM
592 else:
593 if act['type'] == 'wait':
594
595 if donIndex > 2:
596 threshold = donations[donIndex-2] \
597 + Interval.QUANTUM
598 else:
599 threshold = -Interval.QUANTUM
600 else:
601 threshold = curModel['punishIfDon<']
602
603 threshold = (1.-self.learningRate)\
604 *curModel['punishIfDon<']\
605 +self.learningRate*threshold
606 hiInterval['lo'] = threshold
607 loInterval['hi'] = threshold
608 relevant[0]['amount'] = `hiInterval`
609 relevant[1]['amount'] = `loInterval`
610 if 0. in hiInterval:
611 relevant[2]['action'] = self.actionClass({'type':
612 'wait'})
613 else:
614 relevant[2]['action'] = copy.deepcopy(relevant[1]['action'])
615 delta[entity.name] = {'punishIfDon<':threshold}
616 debug.message(8,"New model: %s" % `relevant`)
617 return delta
618
620 models = {}
621 for belief in self.getEntityBeliefs():
622 if not belief.name in [self.name,'Public']:
623
624 model = {}
625
626 entry = belief.policy.entries[1]
627 interval = str2Interval(entry['amount'])
628 model['punishIfDon<'] = interval['hi']
629
630 entry = belief.policy.entries[3]
631 RHS = entry['action']
632 model['donateIfPunBig'] = RHS['amount']
633 if not RHS['amount']:
634 model['donateIfPunBig'] = 0.
635
636 entry = belief.policy.entries[4]
637 RHS = entry['action']
638 model['donateIfPunSmall'] = RHS['amount']
639 if not RHS['amount']:
640 model['donateIfPunSmall'] = 0.
641
642 entry = belief.policy.entries[5]
643 RHS = entry['action']
644 model['donateIfNotPunBig'] = RHS['amount']
645 if not RHS['amount']:
646 model['donateIfNotPunBig'] = 0.
647
648 entry = belief.policy.entries[6]
649 RHS = entry['action']
650 model['donateIfNotPunSmall'] = RHS['amount']
651 if not RHS['amount']:
652 model['donateIfNotPunSmall'] = 0.
653 if len(model.values()) < 5:
654 raise ValueError,'%s has incomplete model' \
655 % (belief.ancestry())
656 models[belief.name] = model
657 return models
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
709
712
714 if amountLost < 0.:
715
716 actorGets = '-amount'
717 threshold = 'amount'
718 objectGets = 'amount'
719 else:
720
721 threshold = amountLost
722 actorGets = -amountLost
723 objectGets = '-amount'
724
725 keyStruct = {'amObject':
726 makeIdentityKey('object'),
727 'amActor':
728 makeIdentityKey('actor'),
729 'actorValue':
730 makeStateKey('actor','wealth'),
731 'myValue':
732 makeStateKey('self','wealth')
733 }
734
735
736 unchangedTree = createNodeTree(KeyedMatrix())
737
738
739 weights = {keyConstant: objectGets}
740 objTree = createDynamicNode(keyStruct['myValue'],weights)
741
742 weights = {keyConstant: actorGets}
743 actorTree = createDynamicNode(keyStruct['myValue'],weights)
744
745
746
747
748
749
750
751
752
753
754
755 weights = {`keyStruct['amActor']`: 1.}
756 donorTree = createBranchTree(KeyedPlane(KeyedRow(weights),0.5),
757 unchangedTree,actorTree)
758
759
760
761 weights = {`keyStruct['amObject']`: 1.}
762 doneeTree = createBranchTree(KeyedPlane(KeyedRow(weights),0.5),
763 donorTree,objTree)
764
765
766 return {'tree':doneeTree}
767
779
784
786 if not __GOOD__:
787 uWealth = 1.
788 goalList = [{'entity':'self',
789 'direction':'max',
790 'type':'state',
791 'feature':'wealth',
792 'weight':uWealth}]
793 if __GOOD__:
794 goalList.append({'entity':'Donee',
795 'direction':'max',
796 'type':'state',
797 'feature':'goodExists',
798 'weight':1.-uWealth})
799 return goalList
800
801 -def makeDonateEntry(lo,hi,label='',depth=1):
802 entry = 'observation depth %d actor neighbor type donate amount [%f,%f]'\
803 % (depth,lo,hi)
804 if len(label) > 0:
805 entry += ' label %s' % label
806 return entry
807
834
836 policy = []
837 punishEntry = 'observation depth 1 type punish object self label ifPun'
838 if len(actions) == 2:
839
840 policy.append('%s -> %s' % (punishEntry,actions['ifPun']))
841
842 policy.append('default -> %s' % (actions['default']))
843 else:
844 ifBig = makeDonateEntry(threshold-Interval.QUANTUM,
845 Interval.CEILING,'ifBig',2)
846 ifSmall = makeDonateEntry(Interval.FLOOR,
847 threshold-Interval.QUANTUM,'ifBig',2)
848
849 policy.append('conjunction %s & %s -> %s' % \
850 (punishEntry,ifBig,actions['ifPunBig']))
851
852
853 policy.append('%s -> %s' % (punishEntry,actions['ifPunSmall']))
854
855
856 policy.append('%s -> %s' % (ifBig,actions['ifNotPunBig']))
857
858
859 policy.append('default -> %s' % (actions['ifNotPunSmall']))
860
861 return policy
862
864 """Creates a policy for the pooler/disburser, with the threshold the cost
865 of the public good (ignored if there is no explicit public good"""
866 policy = []
867 if __GOOD__:
868
869 policy.append('belief entities self state goodExists 0.5 1. -> '\
870 +'{"type":"wait"}')
871
872 policy.append('belief entities self state wealth %4.2f 1.0 -> '\
873 +'{"type":"buyGood"}' % (threshold-0.01))
874
875 policy.append('default -> {"type":"wait"}')
876 else:
877
878 policy.append('default -> {"type":"disburse"}')
879 return policy
880
881
883 """Set the dynamic parameters of the class hierarchy."""
884 default = classHierarchy['PublicGood']
885 donor = classHierarchy['Donor']
886 donee = classHierarchy['Donee']
887 default['horizon'] = args['horizon']
888
889 donor['state']['wealth'] = args['wealth']/float(agentCount())
890
891 donor['goals'] = createGoals(0.2)
892
893 actList = [{'type':'donate','object':['Donee']}]
894 donor['actions'] += actList
895
896 dynamics = default['dynamics']['wealth']
897 dynamics['donate'] = {'class':PWLDynamics,'args':genWealthDyn()}
898 if __PUNISH__:
899
900 actList = [{'type':'punish','object':['neighbor'],
901 'amount':fines}]
902 donor['actions'] += actList
903
904 if args['punish'] != 'lookahead':
905 args['cost'] = 0.
906 dynFun = {'class':PWLDynamics,'args':genWealthDyn(args['cost'])}
907 dynamics['punish'] = dynFun
908
909 if __GOOD__:
910
911 default['dynamics']['goodExists'] = {'buyGood':{'class':PWLDynamics,
912 'args':genGoodDyn()}}
913 default['beliefs'][None]['goodExists'] = None
914 donee['actions'] = [{'type':'buyGood','object':[]}]
915 else:
916
917 donee['actions'] = [{'type':'disburse','object':[],'amount':[]}]
918 dynamics['disburse'] = {'class':PWLDynamics,
919 'args':genDisburseDyn(args['scale'])}
920
921 if args['aggregate']:
922 ideal = {'type':'donate','object':'donee',
923 'amount':args['ideal']*(agentCount()-1)}
924 else:
925 ideal = {'type':'donate','object':'donee',
926 'amount':args['ideal']}
927 wait = {'type':'wait'}
928 for name,model in donor['models'].items():
929 if name == 'deliberate':
930 model['goals'] = createGoals(1.)
931 if args['punish'] == 'lookahead':
932
933
934
935
936 model['policy'].append('default -> {"type":"lookahead",'\
937 +'"amount":3}')
938 else:
939 model['policy'] += makePunishPolicy(args['punishmentThreshold'])
940 else:
941 if name[1] == 'A':
942
943 policy = makePunishPolicy(args['punishmentThreshold'])
944 else:
945
946 policy = makePunishPolicy(0.)
947 if name[0] == 'A':
948
949 punished = copy.copy(ideal)
950 punished['amount'] /= 2.
951 punished['amount'] = min(punished['amount'],Interval.CEILING)
952 policy += makeDonatePolicy({'ifPunBig':wait,
953 'ifPunSmall':ideal,
954 'ifNotPunSmall':punished,
955 'ifNotPunBig':ideal},
956 args['ideal'])
957 else:
958
959 policy += makeDonatePolicy({'ifPunBig':wait,
960 'ifPunSmall':ideal,
961 'ifNotPunSmall':wait,
962 'ifNotPunBig':ideal},
963 args['ideal'])
964 model['policy'] = policy
965
966 model['goals'] = createGoals(1.)
967
968 donee['models']['normal']['policy'] = createPolicy(0.8)
969
970 if args['beta'] != 'deterministic':
971 StochasticLookupAhead.beta = float(args['beta'])
972 PublicGoodAgent.policyClass = StochasticLookupAhead
973
974 if args['network'] == 'random':
975 PublicGoodAgents.fixedNetwork = None
976
977 if args['aggregate']:
978 PublicGoodAgent.mentalType = 'aggregate'
979 else:
980 PublicGoodAgent.mentalType = 'individual'
981
983 sys.stderr.write('Supported arguments:\n')
984 sys.stderr.write('-c|--cost <amt>\t\tPunishing another agent costs <amt>\n')
985 sys.stderr.write('--horizon <T>\t\tAgents compute expected values over <T> games\n')
986 sys.stderr.write('-l|--length <n>\t\tThe agents play <n> iterations of the game\n')
987 sys.stderr.write('-p|--punish <policy>\tPunishment policy is either "fixed" or "lookahead"\n')
988 sys.stderr.write('-s|--scale <num>\tDonations are scaled by <num> before disbursement\n')
989 sys.stderr.write('-w|--wealth <amt>\tThe total wealth among agents is <amt> at start\n')
990 sys.stderr.write('\n')
991 sys.stderr.write('--beta <b>\t\tAgents use a stochastic lokahead policy with beta=<b>\n')
992 sys.stderr.write('--beta deterministic\tAgents use a deterministic lokahead policy\n')
993 sys.stderr.write('\n')
994 sys.stderr.write('--network fixed\t\tUse a static assignment of neighbors\n')
995 sys.stderr.write('--network random\tUse a dynamic, random assignment of neighbors\n')
996 sys.stderr.write('\n')
997 sys.stderr.write('--directory <name>\tSaves the data files in the directory <name>\n')
998 sys.stderr.write('--save <name>\t\tSaves the *initial* game in file <name>\n')
999 sys.stderr.write('\n')
1000 sys.stderr.write('-d|--debug <level>\tSets the output level of detail\n')
1001 sys.stderr.write('-h|--help\t\tPrints this message\n')
1002 sys.exit(value)
1003
1004 if __name__ == '__main__':
1005 import getopt
1006 import os
1007 import profile
1008
1009 args = {'cost': 0.001,
1010 'debug': 1,
1011 'filename': '',
1012 'horizon':2,
1013 'punishmentThreshold': .003,
1014 'ideal':.005,
1015 'scale' : 2.,
1016 'steps': 1,
1017 'punish':'lookahead',
1018 'wealth':.5,
1019 'beta':'deterministic',
1020 'network':'fixed',
1021 'aggregate':1,
1022 'profile':None,
1023 'directory':os.environ['HOME']+'/python/teamwork/examples/games/'
1024 }
1025
1026 try:
1027 optlist,cmdargs = getopt.getopt(sys.argv[1:],'gd:l:s:w:h:p:c:ai',
1028 ['debug=','OO=','PP=','OP=','PO=',
1029 'steps=','length=','wealth=',
1030 'horizon=','help','save=',
1031 'punish=','scale=','directory=',
1032 'beta=','network=','cost=',
1033 'aggregate','individual','profile'])
1034 except getopt.error:
1035 usage()
1036 for option in optlist:
1037 if option[0] == '-g':
1038 __GOOD__ = 1
1039 elif option[0] == '-c' or option[0] == '--cost':
1040 args['cost'] = float(option[1])
1041 elif option[0] == '-d' or option[0] == '--debug':
1042 args['debug'] = int(option[1])
1043 elif option[0] == '-p' or option[0] == '--punish':
1044 args['punish'] = int(option[1])
1045 elif option[0] == '--OO':
1046 population['OptOptDonor'] = int(option[1])
1047 elif option[0] == '--OP':
1048 population['OptPesDonor'] = int(option[1])
1049 elif option[0] == '--PO':
1050 population['PesOptDonor'] = int(option[1])
1051 elif option[0] == '--PP':
1052 population['PesPesDonor'] = int(option[1])
1053 elif option[0] == '-l' or option[0] == '--length':
1054 args['steps'] = int(option[1])
1055 elif option[0] == '-s' or option[0] == '--scale':
1056 args['scale'] = float(option[1])
1057 elif option[0] == '-h' or option[0] == '--help':
1058 usage(0)
1059 elif option[0] == '--horizon':
1060 args['horizon'] = int(option[1])
1061 elif option[0] == '--save':
1062 args['filename'] = option[1]
1063 elif option[0] == '--directory':
1064 args['directory'] = option[1]
1065 elif option[0] == '-w' or option[0] == '--wealth':
1066 args['wealth'] = float(option[1])
1067 elif option[0] == '--beta':
1068 args['beta'] = option[1]
1069 elif option[0] == '--network':
1070 args['network'] = option[1]
1071 elif option[0] == '-a' or option[0] == '--aggregate':
1072 args['aggregate'] = 1
1073 elif option[0] == '-i' or option[0] == '--individual':
1074 args['aggregate'] = None
1075 elif option[0] == '--profile':
1076 args['profile'] = 1
1077 else:
1078 usage()
1079 initialize(args)
1080 dynamics = classHierarchy['PublicGood']['dynamics']
1081 if args['profile']:
1082 import hotshot
1083 prof = hotshot.Profile('intialization.prof')
1084 prof.runcall(PublicGoodTerminal,entities=None,
1085 classes=classHierarchy,dynamics=dynamics,
1086 file=None,debug=args['debug'])
1087 prof.close()
1088 else:
1089 shell = PublicGoodTerminal(None,classHierarchy,dynamics,
1090 None,args['debug'])
1091 sys.stderr.write('Initialization complete.\n')
1092 script = []
1093 results = []
1094 if len(args['filename']) > 0:
1095 shell.save(args['filename'])
1096
1097 fileroot = args['directory']+'/OO%dOP%dPO%dPP%d' \
1098 % (population['OptOptDonor'],population['OptPesDonor'],
1099 population['PesOptDonor'],population['PesPesDonor'])
1100 fields = ['amount','amount','mean','wealth','punishIfDon<',
1101 'donateIfPunBig','donateIfNotPunBig',
1102 'donateIfPunSmall','donateIfNotPunSmall',
1103 'donateIfSmallDiff','donateIfBigDiff']
1104
1105 output = []
1106 for t in range(2*args['steps']+1):
1107 results = []
1108 if t > 0:
1109 sys.stderr.write('Iteration %d' % ((t+1)/2))
1110 if t%2 == 1:
1111 sys.stderr.write(': Donate\n')
1112 else:
1113 sys.stderr.write(': Punish\n')
1114 if args['profile']:
1115 prof = hotshot.Profile('step.prof')
1116 prof.run('data = shell.step(1,results)[0]')
1117 prof.close()
1118 else:
1119 data = shell.step(1,results)[0]
1120 del data['delta']
1121 for result in data.values():
1122 for key in result.keys():
1123 if key == 'decision':
1124 if result[key]['type'] == 'wait':
1125 result[key] = 0.
1126 else:
1127 result[key] = result[key]['amount']
1128 else:
1129 del result[key]
1130 else:
1131 data = {}
1132
1133 for agent in shell.entities.members():
1134 if agent.name != 'Public':
1135 if not data.has_key(agent.name):
1136 data[agent.name] = {}
1137 data[agent.name]['wealth'] = agent.getState('wealth').mean()
1138 models = agent.extractModels().values()
1139 data[agent.name].update(models[0])
1140 output.append(data)
1141
1142 shell.executeCommand('quit')
1143
1144
1145
1146
1147
1148
1149
1150
1151 sys.stderr.write('Saving data...')
1152 shell.mainloop()
1153 for field in fields:
1154 if field == 'amount':
1155 donFile = open(fileroot+'D','w')
1156 punFile = open(fileroot+'P','w')
1157 for index in range(agentCount()):
1158 name = '%s%d' % (namePrefix,index)
1159 stage = None
1160 for step in output[1:]:
1161 content = '\t%6.4f' % (step[name]['decision'])
1162 if stage:
1163 punFile.write(content)
1164 else:
1165 donFile.write(content)
1166 stage = not stage
1167 donFile.write('\n')
1168 punFile.write('\n')
1169 donFile.close()
1170 punFile.close()
1171 elif field == 'mean':
1172 donFile = open(fileroot+'DMean','w')
1173 punFile = open(fileroot+'PMean','w')
1174 stage = None
1175 for step in output[1:]:
1176 totalP = 0.
1177 totalD = 0.
1178 for index in range(agentCount()):
1179 if stage:
1180 totalP += step[name]['decision']
1181 else:
1182 totalD += step[name]['decision']
1183 if stage:
1184 punFile.write('%6.4f\n' % (totalP/float(agentCount())))
1185 else:
1186 donFile.write('%6.4f\n' % (totalD/float(agentCount())))
1187 stage = not stage
1188 donFile.close()
1189 punFile.close()
1190 else:
1191 donFile = open(fileroot+field,'w')
1192 for index in range(agentCount()):
1193 name = '%s%d' % (namePrefix,index)
1194 for step in output:
1195 if field[-4:] == 'Diff':
1196 root = field[:-4]
1197 if root[-3:] == 'Big':
1198 donation = root[-3:]
1199 root = root[:-3]
1200 else:
1201 donation = root[-5:]
1202 root = root[:-5]
1203 value = step[name][root+'Pun'+donation]\
1204 -step[name][root+'NotPun'+donation]
1205 else:
1206 value = step[name][field]
1207 try:
1208 content = '\t%6.4f' % (value)
1209 except TypeError:
1210 raise TypeError,'%s = %s' % (value)
1211 donFile.write(content)
1212 donFile.write('\n')
1213 donFile.close()
1214
1215 sys.stderr.write('Done.\n')
1216 if args['profile']:
1217 stats = hotshot.stats.load('step.prof')
1218 stats.print_stats()
1219