tbp.monty.frameworks.config_utils#

tbp.monty.frameworks.config_utils.cmd_parser#

create_cmd_parser(all_configs)[source]#

Create monty command line argument parser from all available configs.

Parameters:

all_configs – Dict containing all available experiment configurations. Usually each project would have its own list of experiment configurations

Returns:

Command line argument parser

create_cmd_parser_parallel(all_configs)[source]#

Create monty command line argument parser for running episodes in parallel.

This one is designed to run episodes of an experiment in parallel and is used by run_parallel.py.

Parameters:

all_configs – Dict containing all available experiment configurations. Usually each project would have its own list of experiment configurations

Returns:

Command line argument parser

create_rerun_parser(all_configs)[source]#

Create command line argument parser for running.

Parameters:

all_configs – Dict containing all available experiment configurations. Usually each project would have its own list of experiment configurations

Returns:

Command line argument parser

tbp.monty.frameworks.config_utils.config_args#

tbp.monty.frameworks.config_utils.make_dataset_configs#

tbp.monty.frameworks.config_utils.policy_setup_utils#

class BasePolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[ActionSampler], agent_id: str, file_name: str | None = None, switch_frequency: float = 0.05)[source]#

Bases: object

Config for BasePolicy.

action_sampler_args: Dict#
action_sampler_class: Type[ActionSampler]#
agent_id: str#
file_name: str | None = None#
switch_frequency: float = 0.05#
class InformedPolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.03, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25)[source]#

Bases: object

action_sampler_args: Dict#
action_sampler_class: Type[ActionSampler]#
agent_id: str#
desired_object_distance: float = 0.03#
file_name: str | None = None#
good_view_percentage: float = 0.5#
min_perc_on_obj: float = 0.25#
switch_frequency: float = 1.0#
use_goal_state_driven_actions: bool = False#
class NaiveScanPolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.03, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25, fixed_amount: float = 3.0)[source]#

Bases: InformedPolicyConfig

fixed_amount: float = 3.0#
use_goal_state_driven_actions: bool = False#
class SurfaceCurveInformedPolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.025, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25, alpha: float = 0.1, pc_alpha: float = 0.5, max_pc_bias_steps: int = 32, min_general_steps: int = 8, min_heading_steps: int = 12)[source]#

Bases: SurfacePolicyConfig

desired_object_distance: float = 0.025#
max_pc_bias_steps: int = 32#
min_general_steps: int = 8#
min_heading_steps: int = 12#
pc_alpha: float = 0.5#
class SurfacePolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.025, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25, alpha: float = 0.1)[source]#

Bases: InformedPolicyConfig

alpha: float = 0.1#
desired_object_distance: float = 0.025#
generate_action_list(action_space_type) List[Action][source]#

Generate an action list based on a given action space type.

Parameters:

action_space_type – name of action space, one of “distant_agent”, “distant_agent_no_translation”, “absolute_only”, or “surface_agent”

Returns:

Action list to use for the given action space type

make_base_policy_config(action_space_type: str, action_sampler_class: Type[ActionSampler], agent_id: str = 'agent_id_0')[source]#

Generates a config that will apply for the BasePolicy class.

Parameters:
  • action_space_type – name of action space, one of “distant_agent”, “distant_agent_no_translation”, “absolute_only”, or “surface_agent”

  • action_sampler_class – ActionSampler class to use

  • agent_id – Agent name. Defaults to “agent_id_0”.

Returns:

BasePolicyConfig instance

make_curv_surface_policy_config(desired_object_distance, alpha, pc_alpha, max_pc_bias_steps, min_general_steps, min_heading_steps, use_goal_state_driven_actions=False, action_sampler_class: ~typing.Type[~tbp.monty.frameworks.actions.action_samplers.ActionSampler] = <class 'tbp.monty.frameworks.actions.action_samplers.ConstantSampler'>, action_space_type='surface_agent', file_name=None, agent_id='agent_id_0', **kwargs)[source]#

For the SurfacePolicyCurvatureInformed policy.

Parameters:
  • desired_object_distance

    ?

  • alpha

    ?

  • pc_alpha

    ?

  • max_pc_bias_steps

    ?

  • min_general_steps

    ?

  • min_heading_steps

    ?

  • use_goal_state_driven_actions – Defaults to False

  • action_sampler_class – Defaults to ConstantSampler

  • action_space_type – Defaults to “surface_agent”

  • file_name – Defaults to None

  • agent_id – Agent name. Defaults to “agent_id_0”.

  • **kwargs

    Any additional keyword arguments. These may include parameters for ActionSampler configuration:

    absolute_degrees, max_absolute_degrees, min_absolute_degrees, direction, location, rotation_degrees, rotation_quat, max_rotation_degrees, min_rotation_degrees, translation_distance, max_translation, min_translation,

Returns:

SurfaceCurveInformedPolicyConfig instance

make_informed_policy_config(action_space_type: str, action_sampler_class: Type[ActionSampler], good_view_percentage: float = 0.5, use_goal_state_driven_actions: bool = False, file_name: str | None = None, agent_id: str = 'agent_id_0', switch_frequency: float = 1.0, **kwargs)[source]#

Similar to BasePolicyConfigGenerator, but for InformedPolicy class.

Parameters:
  • action_space_type – name of action space, one of “distant_agent”, “distant_agent_no_translation”, “absolute_only”, or “surface_agent”

  • action_sampler_class – ActionSampler class to use

  • good_view_percentage – Defaults to 0.5

  • use_goal_state_driven_actions – Defaults to False

  • file_name – Defaults to None

  • agent_id – Agent name. Defaults to “agent_id_0”.

  • switch_frequency – Defaults to 1.0

  • **kwargs

    Any additional keyword arguments. These may include parameters for ActionSampler configuration:

    absolute_degrees, max_absolute_degrees, min_absolute_degrees, direction, location, rotation_degrees, rotation_quat, max_rotation_degrees, min_rotation_degrees, translation_distance, max_translation, min_translation,

Returns:

InformedPolicyConfig instance

make_naive_scan_policy_config(step_size: float, agent_id='agent_id_0')[source]#

Simliar to InformedPolicyConfigGenerator, but for NaiveScanPolicyConfig.

Currently less flexible than the other two classes above, because this is currently only used with one set of parameters

Parameters:
  • step_size – Fixed amount to move the agent

  • agent_id – Agent name. Defaults to “agent_id_0”.

Returns:

NaiveScanPolicyConfig instance

make_surface_policy_config(desired_object_distance: float, alpha: float, use_goal_state_driven_actions: bool = False, action_sampler_class: ~typing.Type[~tbp.monty.frameworks.actions.action_samplers.ActionSampler] = <class 'tbp.monty.frameworks.actions.action_samplers.ConstantSampler'>, action_space_type: str = 'surface_agent', file_name: str | None = None, agent_id: str = 'agent_id_0', **kwargs)[source]#

Similar to BasePolicyConfigGenerator, but for InformedPolicy class.

Parameters:
  • desired_object_distance

    ?

  • alpha

    ?

  • use_goal_state_driven_actions – Defaults to False

  • action_sampler_class – Defaults to ConstantSampler

  • action_space_type – Defaults to “surface_agent”

  • file_name – Defaults to None

  • agent_id – Agent name. Defaults to “agent_id_0”.

  • **kwargs

    Any additional keyword arguments. These may include parameters for ActionSampler configuration:

    absolute_degrees, max_absolute_degrees, min_absolute_degrees, direction, location, rotation_degrees, rotation_quat, max_rotation_degrees, min_rotation_degrees, translation_distance, max_translation, min_translation,

Returns:

SurfacePolicyConfig instance