|
__init__(self,
states,
team,
name=' Generic COM-MTDP ' ) |
source code
|
|
|
|
|
|
|
|
|
|
|
evaluatePolicy(self,
domPolicy,
comPolicy,
state,
horizon=100,
debug=0)
Computes expected reward of following the domain- and
communication-level policies over the finite horizon specified, from
the initial state specified |
source code
|
|
|
__evaluatePolicy(self,
stateList,
domPolicy,
comPolicy,
horizon,
debug) |
source code
|
|
|
__generateTeamMessages(self,
state,
agents,
msgList)
Generates all possible messages over all agents |
source code
|
|
|
__generateTeamObservations(self,
state,
actions,
agents,
obsList,
debug=0)
Generates all possible observations over all agents |
source code
|
|
|
__generateAgentObservations(self,
state,
actions,
agent,
features,
observations,
debug=0)
Generates all possible observations for an individual agent |
source code
|
|
|
computeBeliefState(self,
states,
agent,
policy,
history)
Partially implemented |
source code
|
|
|
bestAction(self,
agent,
history,
horizon,
policyOthers) |
source code
|
|
|
valueHistory(self,
agent,
history,
horizon,
action,
policyOthers) |
source code
|
|
|
ProjectObservations(self,
states,
agent=None,
beliefs=[ ] ,
epoch=-1,
debug=0) |
source code
|
|
|
ExecuteCommunication(self,
states,
comPolicy,
debug=0) |
source code
|
|
|
ExecuteActions(self,
states,
domPolicy,
debug=0) |
source code
|
|
|
ProjectWorldDynamics(self,
states,
debug=0) |
source code
|
|
|
updateBeliefs(self,
world,
beliefs,
agent)
Generates all possible new belief states for an agent |
source code
|
|