tbp.monty.frameworks.config_utils#

tbp.monty.frameworks.config_utils.cmd_parser#

create_cmd_parser(experiments: list[str])[source]#

Create monty command line argument parser from all available configs.

Parameters:

experiments – List of experiment names available to choose from.

Returns:

Command line argument parser

create_cmd_parser_parallel(experiments: list[str])[source]#

Create monty command line argument parser for running episodes in parallel.

This one is designed to run episodes of an experiment in parallel and is used by run_parallel.py.

Parameters:

experiments – List of experiment names available to choose from.

Returns:

Command line argument parser

create_rerun_parser(experiments: list[str])[source]#

Create command line argument parser for running.

Parameters:

experiments – List of experiment names available to choose from.

Returns:

Command line argument parser

tbp.monty.frameworks.config_utils.config_args#

tbp.monty.frameworks.config_utils.make_dataset_configs#

class DebugExperimentArgs(do_train: bool = True, do_eval: bool = True, show_sensor_output: bool = False, max_train_steps: int = 50, max_eval_steps: int = 50, max_total_steps: int = 400, n_train_epochs: int = 1, n_eval_epochs: int = 1, model_name_or_path: str = '', min_lms_match: int = 1, seed: int = 42)[source]#

Bases: ExperimentArgs

do_eval: bool = True#
do_train: bool = True#
max_eval_steps: int = 50#
max_total_steps: int = 400#
max_train_steps: int = 50#
n_eval_epochs: int = 1#
n_train_epochs: int = 1#
class DefaultEvalObjectList(objects: List[str] = <factory>)[source]#

Bases: object

objects: List[str]#
class DefaultObjectInitializer[source]#

Bases: object

post_episode()[source]#
post_epoch()[source]#
class DefaultTrainObjectList(objects: List[str] = <factory>)[source]#

Bases: object

objects: List[str]#
class EnvInitArgsMontyWorldBrightScenes(data_path: str = '/home/runner/tbp/data/worldimages/bright_scenes/')[source]#

Bases: object

data_path: str = '/home/runner/tbp/data/worldimages/bright_scenes/'#
class EnvInitArgsMontyWorldDarkScenes(data_path: str = '/home/runner/tbp/data/worldimages/dark_scenes/')[source]#

Bases: object

data_path: str = '/home/runner/tbp/data/worldimages/dark_scenes/'#
class EnvInitArgsMontyWorldHandIntrusionScenes(data_path: str = '/home/runner/tbp/data/worldimages/hand_intrusion_scenes/')[source]#

Bases: object

data_path: str = '/home/runner/tbp/data/worldimages/hand_intrusion_scenes/'#
class EnvInitArgsMontyWorldMultiObjectScenes(data_path: str = '/home/runner/tbp/data/worldimages/multi_object_scenes/')[source]#

Bases: object

data_path: str = '/home/runner/tbp/data/worldimages/multi_object_scenes/'#
class EnvInitArgsMontyWorldStandardScenes(data_path: str = '/home/runner/tbp/data/worldimages/standard_scenes/')[source]#

Bases: object

data_path: str = '/home/runner/tbp/data/worldimages/standard_scenes/'#
class EnvironmentDataLoaderPerObjectEvalArgs(object_names: List = <factory>, object_init_sampler: Callable = <factory>)[source]#

Bases: EnvironmentDataloaderPerObjectArgs

object_init_sampler: Callable#
object_names: List#
class EnvironmentDataLoaderPerObjectTrainArgs(object_names: List = <factory>, object_init_sampler: Callable = <factory>)[source]#

Bases: EnvironmentDataloaderPerObjectArgs

object_init_sampler: Callable#
object_names: List#
class EnvironmentDataloaderMultiObjectArgs(object_names: Dict, object_init_sampler: Callable)[source]#

Bases: object

object_init_sampler: Callable#
object_names: Dict#
class EnvironmentDataloaderPerObjectArgs(object_names: List, object_init_sampler: Callable)[source]#

Bases: object

object_init_sampler: Callable#
object_names: List#
class EvalExperimentArgs(do_train: bool = False, do_eval: bool = True, show_sensor_output: bool = False, max_train_steps: int = 1000, max_eval_steps: int = 500, max_total_steps: int = 6000, n_train_epochs: int = 3, n_eval_epochs: int = 1, model_name_or_path: str = '', min_lms_match: int = 1, seed: int = 42, python_log_level: str = 'DEBUG')[source]#

Bases: ExperimentArgs

do_train: bool = False#
n_eval_epochs: int = 1#
python_log_level: str = 'DEBUG'#
class ExperimentArgs(do_train: bool = True, do_eval: bool = True, show_sensor_output: bool = False, max_train_steps: int = 1000, max_eval_steps: int = 500, max_total_steps: int = 6000, n_train_epochs: int = 3, n_eval_epochs: int = 3, model_name_or_path: str = '', min_lms_match: int = 1, seed: int = 42)[source]#

Bases: object

do_eval: bool = True#
do_train: bool = True#
max_eval_steps: int = 500#
max_total_steps: int = 6000#
max_train_steps: int = 1000#
min_lms_match: int = 1#
model_name_or_path: str = ''#
n_eval_epochs: int = 3#
n_train_epochs: int = 3#
seed: int = 42#
show_sensor_output: bool = False#
class FiveLMMountConfig(agent_id: Union[str, NoneType] = 'agent_id_0', sensor_ids: Union[List[str], NoneType] = <factory>, height: Union[float, NoneType] = 0.0, position: List[Union[float, int]] = <factory>, resolutions: List[List[Union[float, int]]] = <factory>, positions: List[List[Union[float, int]]] = <factory>, rotations: List[List[Union[float, int]]] = <factory>, semantics: List[List[Union[float, int]]] = <factory>, zooms: List[float] = <factory>)[source]#

Bases: object

agent_id: str | None = 'agent_id_0'#
height: float | None = 0.0#
position: List[float | int]#
positions: List[List[float | int]]#
resolutions: List[List[float | int]]#
rotations: List[List[float | int]]#
semantics: List[List[float | int]]#
sensor_ids: List[str] | None#
zooms: List[float]#
class FixedRotationEnvironmentDataLoaderPerObjectEvalArgs(object_names: List = <factory>, object_init_sampler: Callable = <factory>)[source]#

Bases: EnvironmentDataloaderPerObjectArgs

object_init_sampler: Callable#
object_names: List#
class FixedRotationEnvironmentDataLoaderPerObjectTrainArgs(object_names: List = <factory>, object_init_sampler: Callable = <factory>)[source]#

Bases: EnvironmentDataloaderPerObjectArgs

object_init_sampler: Callable#
object_names: List#
class InformedEnvironmentDataLoaderEvalArgs(object_names: List = <factory>, object_init_sampler: Callable = <factory>, use_get_good_view_positioning_procedure: bool = False)[source]#

Bases: EnvironmentDataLoaderPerObjectEvalArgs

use_get_good_view_positioning_procedure: bool = False#
class InformedEnvironmentDataLoaderTrainArgs(object_names: List = <factory>, object_init_sampler: Callable = <factory>, use_get_good_view_positioning_procedure: bool = False)[source]#

Bases: EnvironmentDataLoaderPerObjectTrainArgs

use_get_good_view_positioning_procedure: bool = False#
class InformedEnvironmentDataloaderMultiObjectArgs(object_names: Dict, object_init_sampler: Callable, use_get_good_view_positioning_procedure: bool = False)[source]#

Bases: EnvironmentDataloaderMultiObjectArgs

use_get_good_view_positioning_procedure: bool = False#
class MultiLMMountConfig(agent_id: Union[str, NoneType] = 'agent_id_0', sensor_ids: Union[List[str], NoneType] = <factory>, height: Union[float, NoneType] = 0.0, position: List[Union[float, int]] = <factory>, resolutions: List[List[Union[float, int]]] = <factory>, positions: List[List[Union[float, int]]] = <factory>, rotations: List[List[Union[float, int]]] = <factory>, semantics: List[List[Union[float, int]]] = <factory>, zooms: List[float] = <factory>)[source]#

Bases: object

agent_id: str | None = 'agent_id_0'#
height: float | None = 0.0#
position: List[float | int]#
positions: List[List[float | int]]#
resolutions: List[List[float | int]]#
rotations: List[List[float | int]]#
semantics: List[List[float | int]]#
sensor_ids: List[str] | None#
zooms: List[float]#
class NotYCBEvalObjectList(objects: List[str] = <factory>)[source]#

Bases: object

objects: List[str]#
class NotYCBTrainObjectList(objects: List[str] = <factory>)[source]#

Bases: object

objects: List[str]#
class OmniglotDataloaderArgs(alphabets: ~typing.List = <factory>, characters: ~typing.List = <factory>, versions: ~typing.List = <factory>)[source]#

Bases: object

Set basic debug args to load 3 characters of 2 alphabets in 1 version.

alphabets: List#
characters: List#
versions: List#
class OmniglotDatasetArgs(env_init_func: Callable = <class 'tbp.monty.frameworks.environments.two_d_data.OmniglotEnvironment'>, env_init_args: Dict = <factory>, transform: Union[Callable, list, NoneType] = None)[source]#

Bases: object

env_init_func#

alias of OmniglotEnvironment

env_init_args: Dict#
transform: Callable | list | None = None#
class PatchAndViewFinderMountConfig(agent_id: str | None = 'agent_id_0', sensor_ids: ~typing.List[str] | None = <factory>, height: float | None = 0.0, position: ~typing.List[float | int] = <factory>, resolutions: ~typing.List[~typing.List[float | int]] = <factory>, positions: ~typing.List[~typing.List[float | int]] = <factory>, rotations: ~typing.List[~typing.List[float | int]] = <factory>, semantics: ~typing.List[~typing.List[float | int]] = <factory>, zooms: ~typing.List[float] = <factory>)[source]#

Bases: object

Config using view finder to find the object before starting the experiment.

A common default for Viviane’s experiments that use the view finder to navigate so the object is in view before the real experiment happens.

agent_id: str | None = 'agent_id_0'#
height: float | None = 0.0#
position: List[float | int]#
positions: List[List[float | int]]#
resolutions: List[List[float | int]]#
rotations: List[List[float | int]]#
semantics: List[List[float | int]]#
sensor_ids: List[str] | None#
zooms: List[float]#
class PatchAndViewFinderMountLowResConfig(agent_id: Union[str, NoneType] = 'agent_id_0', sensor_ids: Union[List[str], NoneType] = <factory>, height: Union[float, NoneType] = 0.0, position: List[Union[float, int]] = <factory>, resolutions: List[List[Union[float, int]]] = <factory>, positions: List[List[Union[float, int]]] = <factory>, rotations: List[List[Union[float, int]]] = <factory>, semantics: List[List[Union[float, int]]] = <factory>, zooms: List[float] = <factory>)[source]#

Bases: PatchAndViewFinderMountConfig

resolutions: List[List[float | int]]#
class PatchAndViewFinderMultiObjectMountConfig(agent_id: Union[str, NoneType] = 'agent_id_0', sensor_ids: Union[List[str], NoneType] = <factory>, height: Union[float, NoneType] = 0.0, position: List[Union[float, int]] = <factory>, resolutions: List[List[Union[float, int]]] = <factory>, positions: List[List[Union[float, int]]] = <factory>, rotations: List[List[Union[float, int]]] = <factory>, semantics: List[List[Union[float, int]]] = <factory>, zooms: List[float] = <factory>)[source]#

Bases: PatchAndViewFinderMountConfig

semantics: List[List[float | int]]#
class PatchAndViewSensorAgentMapping(agent_ids: List[str] = <factory>, sensor_ids: List[str] = <factory>, sensor_to_agent: Dict = <factory>)[source]#

Bases: SensorAgentMapping

agent_ids: List[str]#
sensor_ids: List[str]#
sensor_to_agent: Dict#
class PredefinedObjectInitializer(positions=None, rotations=None, scales=None, change_every_episode=None)[source]#

Bases: DefaultObjectInitializer

all_combinations_of_params()[source]#
post_episode()[source]#
post_epoch()[source]#
class RandomRotationObjectInitializer(position=None, scale=None)[source]#

Bases: DefaultObjectInitializer

class SensorAgentMapping(agent_ids: List[str], sensor_ids: List[str], sensor_to_agent: Dict)[source]#

Bases: object

agent_ids: List[str]#
sensor_ids: List[str]#
sensor_to_agent: Dict#
class SimpleMountSensorAgentMapping(agent_ids: ~typing.List[str] = <factory>, sensor_ids: ~typing.List[str] = <factory>, sensor_to_agent: ~typing.Dict = <factory>)[source]#

Bases: SensorAgentMapping

Mapping for a sim with a single mount agent with two sensors.

agent_ids: List[str]#
sensor_ids: List[str]#
sensor_to_agent: Dict#
class SingleSensorAgentMapping(agent_ids: ~typing.List[str] = <factory>, sensor_ids: ~typing.List[str] = <factory>, sensor_to_agent: ~typing.Dict = <factory>)[source]#

Bases: SensorAgentMapping

Mapping for a sim with a single agent and single sensor.

agent_ids: List[str]#
sensor_ids: List[str]#
sensor_to_agent: Dict#
class SurfaceAndViewFinderMountConfig(agent_id: str | None = 'agent_id_0', sensor_ids: ~typing.List[str] | None = <factory>, height: float | None = 0.0, position: ~typing.List[float | int] = <factory>, resolutions: ~typing.List[~typing.List[float | int]] = <factory>, positions: ~typing.List[~typing.List[float | int]] = <factory>, rotations: ~typing.List[~typing.List[float | int]] = <factory>, semantics: ~typing.List[~typing.List[float | int]] = <factory>, zooms: ~typing.List[float] = <factory>, action_space_type: str = 'surface_agent')[source]#

Bases: PatchAndViewFinderMountConfig

Use surface agent and view finder to find the object before experiment start.

Adaptation of Viviane’s code that use the view finder to navigate so the object is in view before the real experiment happens + surface-agent sensor

action_space_type: str = 'surface_agent'#
height: float | None = 0.0#
position: List[float | int]#
positions: List[List[float | int]]#
resolutions: List[List[float | int]]#
zooms: List[float]#
class TwoCameraMountConfig(agent_id: Union[str, NoneType] = None, sensor_ids: Union[List[str], NoneType] = None, resolutions: List[List[Union[float, int]]] = <factory>, positions: List[List[Union[float, int]]] = <factory>, rotations: List[List[Union[float, int]]] = <factory>, semantics: List[List[Union[float, int]]] = <factory>, zooms: List[float] = <factory>)[source]#

Bases: object

agent_id: str | None = None#
positions: List[List[float | int]]#
resolutions: List[List[float | int]]#
rotations: List[List[float | int]]#
semantics: List[List[float | int]]#
sensor_ids: List[str] | None = None#
zooms: List[float]#
class TwoLMStackedDistantMountConfig(agent_id: Union[str, NoneType] = 'agent_id_0', sensor_ids: Union[List[str], NoneType] = <factory>, height: Union[float, NoneType] = 0.0, position: List[Union[float, int]] = <factory>, resolutions: List[List[Union[float, int]]] = <factory>, positions: List[List[Union[float, int]]] = <factory>, rotations: List[List[Union[float, int]]] = <factory>, semantics: List[List[Union[float, int]]] = <factory>, zooms: List[float] = <factory>)[source]#

Bases: object

agent_id: str | None = 'agent_id_0'#
height: float | None = 0.0#
position: List[float | int]#
positions: List[List[float | int]]#
resolutions: List[List[float | int]]#
rotations: List[List[float | int]]#
semantics: List[List[float | int]]#
sensor_ids: List[str] | None#
zooms: List[float]#
class TwoLMStackedSurfaceMountConfig(agent_id: Union[str, NoneType] = 'agent_id_0', sensor_ids: Union[List[str], NoneType] = <factory>, height: Union[float, NoneType] = 0.0, position: List[Union[float, int]] = <factory>, resolutions: List[List[Union[float, int]]] = <factory>, positions: List[List[Union[float, int]]] = <factory>, rotations: List[List[Union[float, int]]] = <factory>, semantics: List[List[Union[float, int]]] = <factory>, zooms: List[float] = <factory>, action_space_type: str = 'surface_agent')[source]#

Bases: TwoLMStackedDistantMountConfig

action_space_type: str = 'surface_agent'#
class WorldImageDataloaderArgs(scenes: ~typing.List = <factory>, versions: ~typing.List = <factory>)[source]#

Bases: object

Set basic debug args to load 1 scene (Numenta mug) in 4 versions.

scenes: List#
versions: List#
class WorldImageDatasetArgs(env_init_func: Callable = <class 'tbp.monty.frameworks.environments.two_d_data.SaccadeOnImageEnvironment'>, env_init_args: Dict = <factory>, transform: Union[Callable, list, NoneType] = None)[source]#

Bases: object

env_init_func#

alias of SaccadeOnImageEnvironment

env_init_args: Dict#
transform: Callable | list | None = None#
class WorldImageFromStreamDatasetArgs(env_init_func: Callable = <class 'tbp.monty.frameworks.environments.two_d_data.SaccadeOnImageFromStreamEnvironment'>, env_init_args: Dict = <factory>, transform: Union[Callable, list, NoneType] = None)[source]#

Bases: object

env_init_func#

alias of SaccadeOnImageFromStreamEnvironment

env_init_args: Dict#
transform: Callable | list | None = None#
get_env_dataloader_per_object_by_idx(start, stop, list_of_indices=None)[source]#
get_object_names_by_idx(start, stop, list_of_indices=None, object_list=['mug', 'bowl', 'potted_meat_can', 'master_chef_can', 'i_cups', 'spoon', 'b_cups', 'pitcher_base', 'knife', 'b_marbles', 'h_cups', 'strawberry', 'power_drill', 'padlock', 'golf_ball', 'hammer', 'softball', 'orange', 'c_lego_duplo', 'c_toy_airplane', 'b_lego_duplo', 'banana', 'nine_hole_peg_test', 'tomato_soup_can', 'baseball', 'g_cups', 'gelatin_box', 'lemon', 'plum', 'racquetball', 'plate', 'pudding_box', 'e_cups', 'apple', 'j_cups', 'foam_brick', 'large_marker', 'peach', 'phillips_screwdriver', 'a_toy_airplane', 'e_lego_duplo', 'sugar_box', 'a_colored_wood_blocks', 'c_cups', 'pear', 'f_cups', 'wood_block', 'd_lego_duplo', 'b_toy_airplane', 'b_colored_wood_blocks', 'g_lego_duplo', 'a_lego_duplo', 'mini_soccer_ball', 'medium_clamp', 'a_marbles', 'extra_large_clamp', 'd_cups', 'e_toy_airplane', 'adjustable_wrench', 'rubiks_cube', 'f_lego_duplo', 'a_cups', 'skillet_lid', 'sponge', 'tennis_ball', 'spatula', 'd_toy_airplane', 'chain', 'scissors', 'mustard_bottle', 'bleach_cleanser', 'tuna_fish_can', 'cracker_box', 'fork', 'large_clamp', 'dice', 'flat_screwdriver'])[source]#
get_omniglot_eval_dataloader(start_at_version, alphabet_ids, num_versions=None, data_path=None)[source]#

Generate OmniglotDataloaderArgs automatically for evaluation.

Parameters:
  • start_at_version – Version number of character to start at. Then shows all the remaining versions.

  • alphabet_ids – IDs of alphabets to test. Tests all characters within the alphabet.

  • num_versions – Number of versions of each character to test. If None, all versions are shown.

  • data_path – path to the omniglot dataset. If none its set to ~/tbp/data/omniglot/python/

Returns:

OmniglotDataloaderArgs for evaluation.

get_omniglot_train_dataloader(num_versions, alphabet_ids, data_path=None)[source]#

Generate OmniglotDataloaderArgs automatically for training.

Parameters:
  • num_versions – Number of versions to show for each character (starting at 1).

  • alphabet_ids – IDs of alphabets to show. All characters within an alphabet will be presented which may be a variable amount.

  • data_path – path to the omniglot dataset. If none its set to ~/tbp/data/omniglot/python/

Returns:

OmniglotDataloaderArgs for training.

make_multi_sensor_mount_config(n_sensors: int, agent_id: str = 'agent_id_0', sensor_ids: Sequence[str] | None = None, height: Number = 0.0, position: numpy.typing.ArrayLike = (0, 1.5, 0.2), resolutions: numpy.typing.ArrayLike | None = None, positions: numpy.typing.ArrayLike | None = None, rotations: numpy.typing.ArrayLike | None = None, semantics: numpy.typing.ArrayLike | None = None, zooms: numpy.typing.ArrayLike | None = None) Mapping[str, Any][source]#

Generate a multi-sensor mount configuration.

Creates a multi-sensor, single-agent mount config. Its primary use is in generating a MultiLMMountHabitatDatasetArgs config. Defaults are reasonable and reflect current common practices.

Note

n_sensors indicates the number of non-view-finder sensors. However, the arrays generated for sensor_ids, resolutions, positions, rotations, semantics, and zooms will have length n_sensors + 1 to accommodate a view finder. As such, arguments supplied for these arrays must also have length n_sensors + 1, where the view finder’s values come last.

Parameters:
  • n_sensors – Number of sensors, not including the view finder.

  • agent_id – ID of the agent. Defaults to “agent_id_0”.

  • sensor_ids – IDs of the sensor modules. Defaults to [“patch_0”, “patch_1”, … “patch_{n_sms - 1}”, “view_finder”].

  • height – Height of the agent. Defaults to 0.0.

  • position – Position of the agent. Defaults to [0, 1.5, 0.2].

  • resolutions – Resolutions of the sensors. Defaults to (64, 64) for all sensors.

  • positions – Positions of the sensors. If not provided, calls make_sensor_positions_on_grid with its default arguments.

  • rotations – Rotations of the sensors. Defaults to [1, 0, 0, 0] for all sensors.

  • semantics – Defaults to False for all sensors.

  • zooms – Zooms of the sensors. Defaults to 10.0 for all sensors except for the except for the view finder (which has a zoom of 1.0)

Returns:

A dictionary representing a complete multi-sensor mount config. Arrays

are converted to lists.

Return type:

dict

make_sensor_positions_on_grid(n_sensors: int, delta: Number = 0.01, order_by: str = 'distance', add_view_finder: bool = True) numpy.ndarray[source]#

Generate sensor positions on a 2D grid.

Create mounting positions for an arbitrary number of sensors, where the sensors lie on an imaginary grid on the xy plane (and z = 0). Sensor position 0 is always centered at (0, 0, 0), and all other sensors are clustered around it. The method for selecting which grid points around the center to assign each sensor is determined by the order_by argument (see below).

By default, n_sensors + 1 positions are returned; the first n_sensors positions are for regular sensors, and an additional position is appended by default to accommodate a view finder. The view finder position (if used) is the same as sensor position 0 (i.e., (0, 0, 0)).

Parameters:
  • n_sensors (int) – Number of sensors. Count should not include a view finder.

  • delta (number) – The grid spacing length. By default, sensors will be placed every centimeter (units are in meters).

  • order_by (str, optional) –

    How to select points on the grid that will contain sensors.

    • ”spiral”: sensors are numbered along a counter-clockwise spiral

      spreading outwards from the center.

    • ”distance”: sensors are ordered by their distance from the center.

      This can result in a more jagged pattern along the edges but results in sensors generally more packed towards the center. Positions that are equidistant from the center are ordered counterclockwise starting at 3 o’clock.

  • add_view_finder (bool, optional) – Whether to include an extra position module at the origin to serve as a view finder. Defaults to True.

Returns:

A 2D array of sensor positions where each row is an array of

(x, y, z) positions. If add_view_finder is True, the array has n_sensors + 1 rows, where the last row corresponds to the view finder’s position and is identical to row 0. Otherwise, the array has n_sensors rows. row 0 is always centered at (0, 0, 0), and all other rows are offset relative to it.

Return type:

np.ndarray

tbp.monty.frameworks.config_utils.policy_setup_utils#

class BasePolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[ActionSampler], agent_id: str, file_name: str | None = None, switch_frequency: float = 0.05)[source]#

Bases: object

Config for BasePolicy.

action_sampler_args: Dict#
action_sampler_class: Type[ActionSampler]#
agent_id: str#
file_name: str | None = None#
switch_frequency: float = 0.05#
class InformedPolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.03, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25)[source]#

Bases: object

action_sampler_args: Dict#
action_sampler_class: Type[ActionSampler]#
agent_id: str#
desired_object_distance: float = 0.03#
file_name: str | None = None#
good_view_percentage: float = 0.5#
min_perc_on_obj: float = 0.25#
switch_frequency: float = 1.0#
use_goal_state_driven_actions: bool = False#
class NaiveScanPolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.03, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25, fixed_amount: float = 3.0)[source]#

Bases: InformedPolicyConfig

fixed_amount: float = 3.0#
use_goal_state_driven_actions: bool = False#
class SurfaceCurveInformedPolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.025, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25, alpha: float = 0.1, pc_alpha: float = 0.5, max_pc_bias_steps: int = 32, min_general_steps: int = 8, min_heading_steps: int = 12)[source]#

Bases: SurfacePolicyConfig

desired_object_distance: float = 0.025#
max_pc_bias_steps: int = 32#
min_general_steps: int = 8#
min_heading_steps: int = 12#
pc_alpha: float = 0.5#
class SurfacePolicyConfig(action_sampler_args: Dict, action_sampler_class: Type[tbp.monty.frameworks.actions.action_samplers.ActionSampler], agent_id: str, file_name: str | NoneType = None, good_view_percentage: float = 0.5, desired_object_distance: float = 0.025, use_goal_state_driven_actions: bool = False, switch_frequency: float = 1.0, min_perc_on_obj: float = 0.25, alpha: float = 0.1)[source]#

Bases: InformedPolicyConfig

alpha: float = 0.1#
desired_object_distance: float = 0.025#
generate_action_list(action_space_type) List[Action][source]#

Generate an action list based on a given action space type.

Parameters:

action_space_type – name of action space, one of “distant_agent”, “distant_agent_no_translation”, “absolute_only”, or “surface_agent”

Returns:

Action list to use for the given action space type

make_base_policy_config(action_space_type: str, action_sampler_class: Type[ActionSampler], agent_id: str = 'agent_id_0')[source]#

Generates a config that will apply for the BasePolicy class.

Parameters:
  • action_space_type – name of action space, one of “distant_agent”, “distant_agent_no_translation”, “absolute_only”, or “surface_agent”

  • action_sampler_class – ActionSampler class to use

  • agent_id – Agent name. Defaults to “agent_id_0”.

Returns:

BasePolicyConfig instance

make_curv_surface_policy_config(desired_object_distance, alpha, pc_alpha, max_pc_bias_steps, min_general_steps, min_heading_steps, use_goal_state_driven_actions=False, action_sampler_class: ~typing.Type[~tbp.monty.frameworks.actions.action_samplers.ActionSampler] = <class 'tbp.monty.frameworks.actions.action_samplers.ConstantSampler'>, action_space_type='surface_agent', file_name=None, agent_id='agent_id_0', **kwargs)[source]#

For the SurfacePolicyCurvatureInformed policy.

Parameters:
  • desired_object_distance

    ?

  • alpha

    ?

  • pc_alpha

    ?

  • max_pc_bias_steps

    ?

  • min_general_steps

    ?

  • min_heading_steps

    ?

  • use_goal_state_driven_actions – Defaults to False

  • action_sampler_class – Defaults to ConstantSampler

  • action_space_type – Defaults to “surface_agent”

  • file_name – Defaults to None

  • agent_id – Agent name. Defaults to “agent_id_0”.

  • **kwargs

    Any additional keyword arguments. These may include parameters for ActionSampler configuration:

    absolute_degrees, max_absolute_degrees, min_absolute_degrees, direction, location, rotation_degrees, rotation_quat, max_rotation_degrees, min_rotation_degrees, translation_distance, max_translation, min_translation,

Returns:

SurfaceCurveInformedPolicyConfig instance

make_informed_policy_config(action_space_type: str, action_sampler_class: Type[ActionSampler], good_view_percentage: float = 0.5, use_goal_state_driven_actions: bool = False, file_name: str | None = None, agent_id: str = 'agent_id_0', switch_frequency: float = 1.0, **kwargs)[source]#

Similar to BasePolicyConfigGenerator, but for InformedPolicy class.

Parameters:
  • action_space_type – name of action space, one of “distant_agent”, “distant_agent_no_translation”, “absolute_only”, or “surface_agent”

  • action_sampler_class – ActionSampler class to use

  • good_view_percentage – Defaults to 0.5

  • use_goal_state_driven_actions – Defaults to False

  • file_name – Defaults to None

  • agent_id – Agent name. Defaults to “agent_id_0”.

  • switch_frequency – Defaults to 1.0

  • **kwargs

    Any additional keyword arguments. These may include parameters for ActionSampler configuration:

    absolute_degrees, max_absolute_degrees, min_absolute_degrees, direction, location, rotation_degrees, rotation_quat, max_rotation_degrees, min_rotation_degrees, translation_distance, max_translation, min_translation,

Returns:

InformedPolicyConfig instance

make_naive_scan_policy_config(step_size: float, agent_id='agent_id_0')[source]#

Simliar to InformedPolicyConfigGenerator, but for NaiveScanPolicyConfig.

Currently less flexible than the other two classes above, because this is currently only used with one set of parameters

Parameters:
  • step_size – Fixed amount to move the agent

  • agent_id – Agent name. Defaults to “agent_id_0”.

Returns:

NaiveScanPolicyConfig instance

make_surface_policy_config(desired_object_distance: float, alpha: float, use_goal_state_driven_actions: bool = False, action_sampler_class: ~typing.Type[~tbp.monty.frameworks.actions.action_samplers.ActionSampler] = <class 'tbp.monty.frameworks.actions.action_samplers.ConstantSampler'>, action_space_type: str = 'surface_agent', file_name: str | None = None, agent_id: str = 'agent_id_0', **kwargs)[source]#

Similar to BasePolicyConfigGenerator, but for InformedPolicy class.

Parameters:
  • desired_object_distance

    ?

  • alpha

    ?

  • use_goal_state_driven_actions – Defaults to False

  • action_sampler_class – Defaults to ConstantSampler

  • action_space_type – Defaults to “surface_agent”

  • file_name – Defaults to None

  • agent_id – Agent name. Defaults to “agent_id_0”.

  • **kwargs

    Any additional keyword arguments. These may include parameters for ActionSampler configuration:

    absolute_degrees, max_absolute_degrees, min_absolute_degrees, direction, location, rotation_degrees, rotation_quat, max_rotation_degrees, min_rotation_degrees, translation_distance, max_translation, min_translation,

Returns:

SurfacePolicyConfig instance