Upload 9 files
Browse files- .gitattributes +2 -0
- assets/metadata.textproto +3 -0
- checkpoint +2 -0
- ckpt-400120.data-00000-of-00001 +3 -0
- ckpt-400120.index +0 -0
- fingerprint.pb +3 -0
- policy_specs.pbtxt +359 -0
- saved_model.pb +3 -0
- variables/variables.data-00000-of-00001 +3 -0
- variables/variables.index +0 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
ckpt-400120.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
assets/metadata.textproto
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
xmanager_id: 77467904
|
| 2 |
+
xmanager_wid: 1
|
| 3 |
+
gin_config: "from __gin__ import dynamic_registration\nfrom gin import config\nimport google3.robotics.datasets.specs\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.datasets.datasets as google32\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.filters as google33\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.fractal_rds_input_registry as google34\nfrom google3.robotics.learning.task_explore.tasks.fractal.data.rds import model_spec\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.rds_input_builder as google35\nimport google3.robotics.learning.task_explore.tasks.fractal.data.rds.transforms as google36\nimport google3.robotics.learning.task_explore.tasks.fractal.env_options as google37\nimport google3.robotics.learning.task_explore.tasks.fractal.t2r_models.fractal_critic_model_builder as google38\nimport google3.robotics.learning.task_explore.tasks.fractal.t2r_models.moma_critic_model_builder as google39\nfrom google3.robotics.learning.task_explore.tasks.fractal.tf_agents import action_tokenizer\nimport google3.robotics.learning.task_explore.tasks.fractal.tf_agents.launch.gin_helper as google310\nfrom google3.robotics.learning.task_explore.tasks.fractal.tf_agents import observation_tokenizer\nimport google3.robotics.learning.task_explore.tasks.fractal.tf_agents.sample_clipper as google311\nfrom google3.robotics.learning.task_explore.tasks.mobile_grasping.t2r_models import grasping_44_critic_fat_model_builder\nimport google3.robotics.learning.task_explore.tf_agents.bc0.bc0_actor_net as google312\nfrom google3.robotics.learning.task_explore.tf_agents.bc0 import pretrained_efficientnet_encoder\nimport google3.robotics.learning.task_explore.tf_agents.episode_statistics_visualizations as google313\nimport google3.robotics.learning.task_explore.tf_agents.kuka_e2e_grasping.grasping_net as google314\nimport google3.robotics.learning.task_explore.tf_agents.kuka_e2e_grasping.workspace_clip_policy as google315\nfrom google3.robotics.learning.task_explore.tf_agents.policies import greedy_policy\nimport google3.robotics.learning.task_explore.tf_agents.preprocessors as google316\nfrom google3.robotics.learning.task_explore.tf_agents.sequence import sequence_agent\nfrom google3.robotics.learning.task_explore.tf_agents.sequence import transformer_network\nfrom google3.robotics.learning.task_explore.tf_agents import tf_agents_trainer\nimport google3.robotics.learning.task_explore.utils.gin_string_functions as google317\nfrom robotics_transformer.film_efficientnet import preprocessors\nfrom tensor2robot.preprocessors import image_transformations\nimport tensor2robot.utils.tensorspec_utils\nimport tensorflow as tf\nfrom tf_agents.google.xm import environment_specs\nfrom tf_agents.policies import actor_policy\nimport tf_agents.policies.samplers.qtopt_cem_actions_sampler_continuous_and_one_hot\nfrom tf_agents.train import learner\n\n# Macros:\n# ==============================================================================\nACTION_ORDER = @model_spec.get_action_spec_order()\nACTOR_NETWORK = @transformer_network.TransformerNetwork\nACTOR_OPTIMIZER = @actor_optimizer/tf.keras.optimizers.Adam()\nBASE_TRANSFORM_NAMES = [\'rt_1_arm_depth_without_filters\']\nBATCH_SIZE = 16\nCROP_SIZE = 236\nDATASET_NAMES = [\'fractal.mk1_500tasks_te_real\']\nDATASET_WEIGHTS = [1.0]\nDEBUG_SUMMARIES = True\nLEARNING_RATE_ACTOR = 0.0001\nLOG_SUMMARY_INTERVAL = 280\nMAX_TRAINING_STEPS = 45000\nNUM_SHARDS_REVERB = 40\nPOLICY_CHECKPOINT_INTERVAL = 280\nREPLAY_BUFFER_BACKEND = None\nSEQUENCE_LENGTH = 6\nSPLIT_SLICES = [\'\']\nSPLITS = [\'train\']\nTF_AGENT_CLASS = @sequence_agent.SequenceAgent\nTRAIN_CHECKPOINT_INTERVAL = 280\nTRAIN_DIR = \\\n \'/cns/tp-d/home/tedxiao-brain/tedxiao/rs=6.3:sl=48M/ttl=12w:gc=0/tot_rds_test_112923_1v1_experiment_77467904/1/train\'\nTRAIN_LOG_DIR = \\\n \'/cns/tp-d/home/tedxiao-brain/tedxiao/rs=6.3:sl=48M/ttl=12w:gc=0/tot_rds_test_112923_1v1_experiment_77467904/1/train\'\nTRAIN_TABLE_NAME = None\nUSE_TCL = False\nVOCAB_SIZE = 255\n\n# Parameters for google316._distort_proxy_images:\n# ==============================================================================\ngoogle316._distort_proxy_images.all_augs = False\ngoogle316._distort_proxy_images.use_cutout = False\n\n# Parameters for environment_specs.action_spec:\n# ==============================================================================\n# None.\n\n# Parameters for actor_policy.ActorPolicy:\n# ==============================================================================\nactor_policy.ActorPolicy.clip = True\nactor_policy.ActorPolicy.name = None\nactor_policy.ActorPolicy.observation_and_action_constraint_splitter = None\nactor_policy.ActorPolicy.observation_normalizer = None\nactor_policy.ActorPolicy.policy_state_spec = ()\n\n# Parameters for actor_optimizer/tf.keras.optimizers.Adam:\n# ==============================================================================\nactor_optimizer/tf.keras.optimizers.Adam.adaptive_epsilon = False\nactor_optimizer/tf.keras.optimizers.Adam.amsgrad = False\nactor_optimizer/tf.keras.optimizers.Adam.beta_1 = 0.9\nactor_optimizer/tf.keras.optimizers.Adam.beta_2 = 0.999\nactor_optimizer/tf.keras.optimizers.Adam.clipnorm = None\nactor_optimizer/tf.keras.optimizers.Adam.clipvalue = None\nactor_optimizer/tf.keras.optimizers.Adam.ema_momentum = 0.99\nactor_optimizer/tf.keras.optimizers.Adam.ema_overwrite_frequency = None\nactor_optimizer/tf.keras.optimizers.Adam.epsilon = 1e-07\nactor_optimizer/tf.keras.optimizers.Adam.global_clipnorm = None\nactor_optimizer/tf.keras.optimizers.Adam.jit_compile = True\nactor_optimizer/tf.keras.optimizers.Adam.learning_rate = %LEARNING_RATE_ACTOR\nactor_optimizer/tf.keras.optimizers.Adam.name = \'Adam\'\nactor_optimizer/tf.keras.optimizers.Adam.use_ema = False\nactor_optimizer/tf.keras.optimizers.Adam.weight_decay = None\n\n# Parameters for image_transformations.ApplyPhotometricImageDistortions:\n# ==============================================================================\nimage_transformations.ApplyPhotometricImageDistortions.lower_contrast = 0.5\nimage_transformations.ApplyPhotometricImageDistortions.lower_saturation = 0.5\nimage_transformations.ApplyPhotometricImageDistortions.max_delta_brightness = 0.125\nimage_transformations.ApplyPhotometricImageDistortions.max_delta_hue = 0.2\nimage_transformations.ApplyPhotometricImageDistortions.random_noise_apply_probability = \\\n 0.5\nimage_transformations.ApplyPhotometricImageDistortions.upper_contrast = 1.5\nimage_transformations.ApplyPhotometricImageDistortions.upper_saturation = 1.5\n\n# Parameters for preprocessors.convert_dtype_and_crop_images:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.create_agent_and_specs:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.create_and_mix_rds_datasets:\n# ==============================================================================\ntf_agents_trainer.create_and_mix_rds_datasets.fewer_datasets_than_weights = False\ntf_agents_trainer.create_and_mix_rds_datasets.prefetch = True\ntf_agents_trainer.create_and_mix_rds_datasets.sample_from_datasets = False\n\n# Parameters for model_spec.create_model_spec:\n# ==============================================================================\n# None.\n\n# Parameters for google35.create_rds_episode_input_pipelines:\n# ==============================================================================\ngoogle35.create_rds_episode_input_pipelines.rds_dataset_names = %DATASET_NAMES\ngoogle35.create_rds_episode_input_pipelines.split_slices = %SPLIT_SLICES\ngoogle35.create_rds_episode_input_pipelines.splits = %SPLITS\ngoogle35.create_rds_episode_input_pipelines.traj_transforms_names = \\\n @google34.infer_trajectory_transform_names()\n\n# Parameters for google35.create_rds_input_pipeline:\n# ==============================================================================\ngoogle35.create_rds_input_pipeline.episode_ds_pipeline_fn = None\ngoogle35.create_rds_input_pipeline.repeat = True\ngoogle35.create_rds_input_pipeline.traj_shuffle_buffer_size = 4096\ngoogle35.create_rds_input_pipeline.use_replicated = True\n\n# Parameters for google35.create_rds_input_pipeline_for_registered_trajectory_transform:\n# ==============================================================================\ngoogle35.create_rds_input_pipeline_for_registered_trajectory_transform.allow_read_cached = \\\n True\n\n# Parameters for google35.create_rds_input_pipelines_for_registered_trajectory_transforms:\n# ==============================================================================\ngoogle35.create_rds_input_pipelines_for_registered_trajectory_transforms.episode_shuffle_buffer_size = \\\n 1\ngoogle35.create_rds_input_pipelines_for_registered_trajectory_transforms.rds_dataset_names = \\\n %DATASET_NAMES\ngoogle35.create_rds_input_pipelines_for_registered_trajectory_transforms.split_slices = \\\n %SPLIT_SLICES\ngoogle35.create_rds_input_pipelines_for_registered_trajectory_transforms.splits = \\\n %SPLITS\ngoogle35.create_rds_input_pipelines_for_registered_trajectory_transforms.traj_transforms_names = \\\n @google34.infer_trajectory_transform_names()\n\n# Parameters for google36.create_traj_transform_bc_transformer_builder:\n# ==============================================================================\ngoogle36.create_traj_transform_bc_transformer_builder.use_half_transition = True\ngoogle36.create_traj_transform_bc_transformer_builder.wrapper = None\n\n# Parameters for google36.create_traj_transform_bc_transformer_with_filters_builder:\n# ==============================================================================\ngoogle36.create_traj_transform_bc_transformer_with_filters_builder.step_filters = \\\n None\ngoogle36.create_traj_transform_bc_transformer_with_filters_builder.use_half_transition = \\\n True\n\n# Parameters for pretrained_efficientnet_encoder.EfficientNetEncoder:\n# ==============================================================================\npretrained_efficientnet_encoder.EfficientNetEncoder.freeze = False\npretrained_efficientnet_encoder.EfficientNetEncoder.include_top = False\npretrained_efficientnet_encoder.EfficientNetEncoder.model_variant = \'b3\'\npretrained_efficientnet_encoder.EfficientNetEncoder.weights = \'imagenet\'\n\n# Parameters for add_split_and_split_slice_to_dataset/google317.elementwise_string_join:\n# ==============================================================================\nadd_split_and_split_slice_to_dataset/google317.elementwise_string_join.left = \\\n %DATASET_NAMES\nadd_split_and_split_slice_to_dataset/google317.elementwise_string_join.right = \\\n @add_split_slice_to_split/google317.elementwise_string_join()\nadd_split_and_split_slice_to_dataset/google317.elementwise_string_join.separator = \\\n \'_\'\n\n# Parameters for add_split_slice_to_split/google317.elementwise_string_join:\n# ==============================================================================\nadd_split_slice_to_split/google317.elementwise_string_join.left = \\\n @add_split_slice_to_split/google35.infer_split()\nadd_split_slice_to_split/google317.elementwise_string_join.right = \\\n @add_split_slice_to_split/google35.infer_split_slice()\nadd_split_slice_to_split/google317.elementwise_string_join.separator = \'\'\n\n# Parameters for add_traj_transform_to_dataset/google317.elementwise_string_join:\n# ==============================================================================\nadd_traj_transform_to_dataset/google317.elementwise_string_join.left = \\\n @add_split_and_split_slice_to_dataset/google317.elementwise_string_join()\nadd_traj_transform_to_dataset/google317.elementwise_string_join.right = \\\n @google34.infer_trajectory_transform_names()\nadd_traj_transform_to_dataset/google317.elementwise_string_join.separator = \'_\'\n\n# Parameters for google36.episode_to_steps:\n# ==============================================================================\ngoogle36.episode_to_steps.discount_rate = 0.98\ngoogle36.episode_to_steps.keep_only_initial = False\ngoogle36.episode_to_steps.keep_only_initial_final = False\ngoogle36.episode_to_steps.num_padding_steps = None\ngoogle36.episode_to_steps.propagate_metadata_to_step = False\ngoogle36.episode_to_steps.use_goal_image = False\n\n# Parameters for action_tokenizer.FractalActionTokenizer:\n# ==============================================================================\naction_tokenizer.FractalActionTokenizer.action_token_shift = None\n\n# Parameters for observation_tokenizer.FractalObservationTokenizer:\n# ==============================================================================\nobservation_tokenizer.FractalObservationTokenizer.num_context_tokens = 1\nobservation_tokenizer.FractalObservationTokenizer.num_token_per_image = 8\nobservation_tokenizer.FractalObservationTokenizer.prepend_context_to_image = False\n\n# Parameters for model_spec.get_action_spec:\n# ==============================================================================\nmodel_spec.get_action_spec.base_displacement_vector_limit = 1.0\nmodel_spec.get_action_spec.base_displacement_vertical_rotation_limit = \\\n 3.141592653589793\nmodel_spec.get_action_spec.world_vector_limit = 1.0\n\n# Parameters for ACTION_ORDER/model_spec.get_action_spec:\n# ==============================================================================\nACTION_ORDER/model_spec.get_action_spec.base_displacement_vector_limit = 1.0\nACTION_ORDER/model_spec.get_action_spec.base_displacement_vertical_rotation_limit = \\\n 3.141592653589793\nACTION_ORDER/model_spec.get_action_spec.world_vector_limit = 1.0\n\n# Parameters for model_spec.get_action_spec_order:\n# ==============================================================================\n# None.\n\n# Parameters for ACTION_ORDER/model_spec.get_action_spec_order:\n# ==============================================================================\nACTION_ORDER/model_spec.get_action_spec_order.action_spec_option = \\\n %google3.robotics.learning.task_explore.tasks.fractal.env_options.ActionSpaceOptions.ARM\n\n# Parameters for model_spec.get_additional_state_images:\n# ==============================================================================\n# None.\n\n# Parameters for model_spec.get_spec:\n# ==============================================================================\nmodel_spec.get_spec.action_spec_option = \\\n %google3.robotics.learning.task_explore.tasks.fractal.env_options.ActionSpaceOptions.ARM\nmodel_spec.get_spec.observation_spec_option = \\\n %google3.robotics.learning.task_explore.tasks.fractal.data.rds.model_spec.ObservationSpecOptions.RT_1\n\n# Parameters for greedy_policy.GreedyPolicy:\n# ==============================================================================\ngreedy_policy.GreedyPolicy.name = None\n\n# Parameters for google35.infer_split:\n# ==============================================================================\ngoogle35.infer_split.default_split = \'train\'\n\n# Parameters for add_split_slice_to_split/google35.infer_split:\n# ==============================================================================\nadd_split_slice_to_split/google35.infer_split.default_split = \'train\'\nadd_split_slice_to_split/google35.infer_split.rds_dataset_names = %DATASET_NAMES\nadd_split_slice_to_split/google35.infer_split.splits = %SPLITS\n\n# Parameters for google35.infer_split_slice:\n# ==============================================================================\n# None.\n\n# Parameters for add_split_slice_to_split/google35.infer_split_slice:\n# ==============================================================================\nadd_split_slice_to_split/google35.infer_split_slice.rds_dataset_names = \\\n %DATASET_NAMES\nadd_split_slice_to_split/google35.infer_split_slice.split_slices = %SPLIT_SLICES\n\n# Parameters for google34.infer_trajectory_transform_names:\n# ==============================================================================\ngoogle34.infer_trajectory_transform_names.base_transform_names = \\\n %BASE_TRANSFORM_NAMES\ngoogle34.infer_trajectory_transform_names.experience_sequence_length = \\\n %SEQUENCE_LENGTH\n\n# Parameters for add_traj_transform_to_dataset/google34.infer_trajectory_transform_names:\n# ==============================================================================\nadd_traj_transform_to_dataset/google34.infer_trajectory_transform_names.base_transform_names = \\\n %BASE_TRANSFORM_NAMES\nadd_traj_transform_to_dataset/google34.infer_trajectory_transform_names.experience_sequence_length = \\\n %SEQUENCE_LENGTH\n\n# Parameters for learner.Learner:\n# ==============================================================================\nlearner.Learner.after_train_strategy_step_fn = None\nlearner.Learner.experience_dataset_options = None\nlearner.Learner.max_checkpoints_to_keep = 3\nlearner.Learner.strategy_run_options = None\nlearner.Learner.summary_root_dir = None\nlearner.Learner.triggers = None\nlearner.Learner.use_kwargs_in_agent_train = False\n\n# Parameters for google316.ProxyPreProcessor:\n# ==============================================================================\ngoogle316.ProxyPreProcessor.image_history_len = None\n\n# Parameters for google33.RDSFilterBuilder:\n# ==============================================================================\ngoogle33.RDSFilterBuilder.episode_threshold_fn = None\ngoogle33.RDSFilterBuilder.filter_negative_rewards = False\ngoogle33.RDSFilterBuilder.filter_small_action_threshold = None\ngoogle33.RDSFilterBuilder.fractal_dataset_registry_legacy_dataset_name = None\ngoogle33.RDSFilterBuilder.fractal_project_name_pattern = None\ngoogle33.RDSFilterBuilder.positive_filter = False\n\n# Parameters for sequence_agent.SequenceAgent:\n# ==============================================================================\nsequence_agent.SequenceAgent.action_spec = @environment_specs.action_spec()\nsequence_agent.SequenceAgent.actor_network = %ACTOR_NETWORK\nsequence_agent.SequenceAgent.actor_optimizer = %ACTOR_OPTIMIZER\nsequence_agent.SequenceAgent.debug_summaries = %DEBUG_SUMMARIES\nsequence_agent.SequenceAgent.time_sequence_length = %SEQUENCE_LENGTH\nsequence_agent.SequenceAgent.time_step_spec = @environment_specs.time_step_spec()\nsequence_agent.SequenceAgent.use_tcl = %USE_TCL\n\n# Parameters for add_split_and_split_slice_to_dataset/google317.string_join:\n# ==============================================================================\n# None.\n\n# Parameters for add_split_slice_to_split/google317.string_join:\n# ==============================================================================\n# None.\n\n# Parameters for add_traj_transform_to_dataset/google317.string_join:\n# ==============================================================================\n# None.\n\n# Parameters for google36.tfa_transition_builder:\n# ==============================================================================\n# None.\n\n# Parameters for environment_specs.time_step_spec:\n# ==============================================================================\n# None.\n\n# Parameters for tf_agents_trainer.train:\n# ==============================================================================\ntf_agents_trainer.train.batch_size = %BATCH_SIZE\ntf_agents_trainer.train.data_spec_create_fn = @model_spec.create_model_spec\ntf_agents_trainer.train.data_spec_path = \\\n \'/cns/tp-d/home/tedxiao-brain/tedxiao/rs=6.3:sl=48M/ttl=12w:gc=0/tot_rds_test_112923_1v1_experiment_77467904/1/train/collect_policy/environment_specs.textproto\'\ntf_agents_trainer.train.enable_xla = False\ntf_agents_trainer.train.experience_sequence_length = %SEQUENCE_LENGTH\ntf_agents_trainer.train.flywheel_tick_counter_ctor = None\ntf_agents_trainer.train.in_graph_bellman_update = False\ntf_agents_trainer.train.log_interval = %LOG_SUMMARY_INTERVAL\ntf_agents_trainer.train.num_shards = %NUM_SHARDS_REVERB\ntf_agents_trainer.train.number_training_steps = %MAX_TRAINING_STEPS\ntf_agents_trainer.train.policy_checkpoint_interval = %POLICY_CHECKPOINT_INTERVAL\ntf_agents_trainer.train.pull_buffer_names = \\\n @add_traj_transform_to_dataset/google317.elementwise_string_join()\ntf_agents_trainer.train.pull_buffer_weights = %DATASET_WEIGHTS\ntf_agents_trainer.train.rds_datasets_create_fn = \\\n @google35.create_rds_input_pipelines_for_registered_trajectory_transforms()\ntf_agents_trainer.train.rds_episode_datasets_create_fn = \\\n @google35.create_rds_episode_input_pipelines()\ntf_agents_trainer.train.replay_buffer_backend = %REPLAY_BUFFER_BACKEND\ntf_agents_trainer.train.replay_buffer_name = %TRAIN_TABLE_NAME\ntf_agents_trainer.train.saved_model_policy_wrapper = None\ntf_agents_trainer.train.summary_episode_replay_buffer_names = \\\n @add_traj_transform_to_dataset/google317.elementwise_string_join()\ntf_agents_trainer.train.summary_interval = %LOG_SUMMARY_INTERVAL\ntf_agents_trainer.train.tf_agent_class = %TF_AGENT_CLASS\ntf_agents_trainer.train.train_checkpoint_interval = %TRAIN_CHECKPOINT_INTERVAL\ntf_agents_trainer.train.train_dir = %TRAIN_DIR\ntf_agents_trainer.train.train_log_dir = %TRAIN_LOG_DIR\ntf_agents_trainer.train.train_summary = False\ntf_agents_trainer.train.train_summary_episode = False\ntf_agents_trainer.train.train_summary_load_latest_ckpt = True\ntf_agents_trainer.train.warm_start_dir = None\n\n# Parameters for transformer_network.TransformerNetwork:\n# ==============================================================================\ntransformer_network.TransformerNetwork.action_order = %ACTION_ORDER\ntransformer_network.TransformerNetwork.action_scales = [%VOCAB_SIZE]\ntransformer_network.TransformerNetwork.continuous_robot_state_features = ()\ntransformer_network.TransformerNetwork.crop_size = %CROP_SIZE\ntransformer_network.TransformerNetwork.dropout_rate = 0.1\ntransformer_network.TransformerNetwork.feed_forward_hidden_size = 512\ntransformer_network.TransformerNetwork.feed_forward_output_size = 512\ntransformer_network.TransformerNetwork.ffn_option = \'swiglu\'\ntransformer_network.TransformerNetwork.image_patch_size = 16\ntransformer_network.TransformerNetwork.image_position_embedding = \\\n %google3.robotics.learning.task_explore.tasks.fractal.tf_agents.observation_tokenizer.PositionEmbeddingType.NONE\ntransformer_network.TransformerNetwork.image_token_encoder = \\\n %google3.robotics.learning.task_explore.tasks.fractal.tf_agents.observation_tokenizer.EncoderType.FILM_PRETRAINED_EFFICIENT_NET\ntransformer_network.TransformerNetwork.images_to_use = (\'image\',)\ntransformer_network.TransformerNetwork.include_prev_timesteps_actions = False\ntransformer_network.TransformerNetwork.include_same_timestep_prev_action_dimensions = \\\n False\ntransformer_network.TransformerNetwork.inference_time_return_discount_factor = 0.98\ntransformer_network.TransformerNetwork.layer_size = 128\ntransformer_network.TransformerNetwork.num_heads = 8\ntransformer_network.TransformerNetwork.num_layers = 8\ntransformer_network.TransformerNetwork.output_tokens_per_frame = None\ntransformer_network.TransformerNetwork.return_attention_scores = False\ntransformer_network.TransformerNetwork.return_optimality_weight = 0.1\ntransformer_network.TransformerNetwork.return_top_percentile = 85\ntransformer_network.TransformerNetwork.return_vocab_size = 128\ntransformer_network.TransformerNetwork.should_log_image = False\ntransformer_network.TransformerNetwork.stack_images = False\ntransformer_network.TransformerNetwork.state_features = None\ntransformer_network.TransformerNetwork.tcl_weight = 0.05\ntransformer_network.TransformerNetwork.token_embedding_size = 512\ntransformer_network.TransformerNetwork.use_token_learner = True\ntransformer_network.TransformerNetwork.vocab_size = %VOCAB_SIZE\n"
|
checkpoint
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model_checkpoint_path: "/cns/tp-d/home/tedxiao-brain/tedxiao/rs=6.3:sl=48M/ttl=12w:gc=0/tot_rds_test_112923_1v1_experiment_77467904/1/train/policy/000400120/ckpt-400120"
|
| 2 |
+
all_model_checkpoint_paths: "/cns/tp-d/home/tedxiao-brain/tedxiao/rs=6.3:sl=48M/ttl=12w:gc=0/tot_rds_test_112923_1v1_experiment_77467904/1/train/policy/000400120/ckpt-400120"
|
ckpt-400120.data-00000-of-00001
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69952f3a6a8f7ee2feb6fdd6b6d191fe7b4d77509aebf68033cba0c97de68aef
|
| 3 |
+
size 473890853
|
ckpt-400120.index
ADDED
|
Binary file (34.3 kB). View file
|
|
|
fingerprint.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ad4d0dda0fb4f9f2866c8244537262ec23677d7db21343a2d3aaa10a788afa2
|
| 3 |
+
size 56
|
policy_specs.pbtxt
ADDED
|
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dict_value {
|
| 2 |
+
fields {
|
| 3 |
+
key: "collect_data_spec"
|
| 4 |
+
value {
|
| 5 |
+
named_tuple_value {
|
| 6 |
+
name: "Trajectory"
|
| 7 |
+
values {
|
| 8 |
+
key: "step_type"
|
| 9 |
+
value {
|
| 10 |
+
tensor_spec_value {
|
| 11 |
+
name: "step_type"
|
| 12 |
+
shape {
|
| 13 |
+
}
|
| 14 |
+
dtype: DT_INT32
|
| 15 |
+
}
|
| 16 |
+
}
|
| 17 |
+
}
|
| 18 |
+
values {
|
| 19 |
+
key: "observation"
|
| 20 |
+
value {
|
| 21 |
+
dict_value {
|
| 22 |
+
fields {
|
| 23 |
+
key: "image"
|
| 24 |
+
value {
|
| 25 |
+
bounded_tensor_spec_value {
|
| 26 |
+
name: "image"
|
| 27 |
+
shape {
|
| 28 |
+
dim {
|
| 29 |
+
size: 256
|
| 30 |
+
}
|
| 31 |
+
dim {
|
| 32 |
+
size: 320
|
| 33 |
+
}
|
| 34 |
+
dim {
|
| 35 |
+
size: 3
|
| 36 |
+
}
|
| 37 |
+
}
|
| 38 |
+
dtype: DT_UINT8
|
| 39 |
+
minimum {
|
| 40 |
+
dtype: DT_UINT8
|
| 41 |
+
tensor_shape {
|
| 42 |
+
}
|
| 43 |
+
int_val: 0
|
| 44 |
+
}
|
| 45 |
+
maximum {
|
| 46 |
+
dtype: DT_UINT8
|
| 47 |
+
tensor_shape {
|
| 48 |
+
}
|
| 49 |
+
int_val: 255
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
fields {
|
| 55 |
+
key: "natural_language_embedding"
|
| 56 |
+
value {
|
| 57 |
+
tensor_spec_value {
|
| 58 |
+
name: "natural_language_embedding"
|
| 59 |
+
shape {
|
| 60 |
+
dim {
|
| 61 |
+
size: 512
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
dtype: DT_FLOAT
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
+
}
|
| 71 |
+
values {
|
| 72 |
+
key: "action"
|
| 73 |
+
value {
|
| 74 |
+
dict_value {
|
| 75 |
+
fields {
|
| 76 |
+
key: "gripper_closedness_action"
|
| 77 |
+
value {
|
| 78 |
+
bounded_tensor_spec_value {
|
| 79 |
+
name: "gripper_closedness_action"
|
| 80 |
+
shape {
|
| 81 |
+
dim {
|
| 82 |
+
size: 1
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
dtype: DT_FLOAT
|
| 86 |
+
minimum {
|
| 87 |
+
dtype: DT_FLOAT
|
| 88 |
+
tensor_shape {
|
| 89 |
+
}
|
| 90 |
+
float_val: -1.0
|
| 91 |
+
}
|
| 92 |
+
maximum {
|
| 93 |
+
dtype: DT_FLOAT
|
| 94 |
+
tensor_shape {
|
| 95 |
+
}
|
| 96 |
+
float_val: 1.0
|
| 97 |
+
}
|
| 98 |
+
}
|
| 99 |
+
}
|
| 100 |
+
}
|
| 101 |
+
fields {
|
| 102 |
+
key: "rotation_delta"
|
| 103 |
+
value {
|
| 104 |
+
bounded_tensor_spec_value {
|
| 105 |
+
name: "rotation_delta"
|
| 106 |
+
shape {
|
| 107 |
+
dim {
|
| 108 |
+
size: 3
|
| 109 |
+
}
|
| 110 |
+
}
|
| 111 |
+
dtype: DT_FLOAT
|
| 112 |
+
minimum {
|
| 113 |
+
dtype: DT_FLOAT
|
| 114 |
+
tensor_shape {
|
| 115 |
+
}
|
| 116 |
+
float_val: -1.5707964
|
| 117 |
+
}
|
| 118 |
+
maximum {
|
| 119 |
+
dtype: DT_FLOAT
|
| 120 |
+
tensor_shape {
|
| 121 |
+
}
|
| 122 |
+
float_val: 1.5707964
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
}
|
| 126 |
+
}
|
| 127 |
+
fields {
|
| 128 |
+
key: "terminate_episode"
|
| 129 |
+
value {
|
| 130 |
+
bounded_tensor_spec_value {
|
| 131 |
+
name: "terminate_episode"
|
| 132 |
+
shape {
|
| 133 |
+
dim {
|
| 134 |
+
size: 3
|
| 135 |
+
}
|
| 136 |
+
}
|
| 137 |
+
dtype: DT_INT32
|
| 138 |
+
minimum {
|
| 139 |
+
dtype: DT_INT32
|
| 140 |
+
tensor_shape {
|
| 141 |
+
}
|
| 142 |
+
int_val: 0
|
| 143 |
+
}
|
| 144 |
+
maximum {
|
| 145 |
+
dtype: DT_INT32
|
| 146 |
+
tensor_shape {
|
| 147 |
+
}
|
| 148 |
+
int_val: 1
|
| 149 |
+
}
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
}
|
| 153 |
+
fields {
|
| 154 |
+
key: "world_vector"
|
| 155 |
+
value {
|
| 156 |
+
bounded_tensor_spec_value {
|
| 157 |
+
name: "world_vector"
|
| 158 |
+
shape {
|
| 159 |
+
dim {
|
| 160 |
+
size: 3
|
| 161 |
+
}
|
| 162 |
+
}
|
| 163 |
+
dtype: DT_FLOAT
|
| 164 |
+
minimum {
|
| 165 |
+
dtype: DT_FLOAT
|
| 166 |
+
tensor_shape {
|
| 167 |
+
}
|
| 168 |
+
float_val: -1.0
|
| 169 |
+
}
|
| 170 |
+
maximum {
|
| 171 |
+
dtype: DT_FLOAT
|
| 172 |
+
tensor_shape {
|
| 173 |
+
}
|
| 174 |
+
float_val: 1.0
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
}
|
| 181 |
+
}
|
| 182 |
+
values {
|
| 183 |
+
key: "policy_info"
|
| 184 |
+
value {
|
| 185 |
+
dict_value {
|
| 186 |
+
fields {
|
| 187 |
+
key: "discounted_return"
|
| 188 |
+
value {
|
| 189 |
+
bounded_tensor_spec_value {
|
| 190 |
+
name: "discounted_return"
|
| 191 |
+
shape {
|
| 192 |
+
}
|
| 193 |
+
dtype: DT_FLOAT
|
| 194 |
+
minimum {
|
| 195 |
+
dtype: DT_FLOAT
|
| 196 |
+
tensor_shape {
|
| 197 |
+
}
|
| 198 |
+
float_val: 0.0
|
| 199 |
+
}
|
| 200 |
+
maximum {
|
| 201 |
+
dtype: DT_FLOAT
|
| 202 |
+
tensor_shape {
|
| 203 |
+
}
|
| 204 |
+
float_val: 1.0
|
| 205 |
+
}
|
| 206 |
+
}
|
| 207 |
+
}
|
| 208 |
+
}
|
| 209 |
+
fields {
|
| 210 |
+
key: "return"
|
| 211 |
+
value {
|
| 212 |
+
bounded_tensor_spec_value {
|
| 213 |
+
name: "return"
|
| 214 |
+
shape {
|
| 215 |
+
}
|
| 216 |
+
dtype: DT_FLOAT
|
| 217 |
+
minimum {
|
| 218 |
+
dtype: DT_FLOAT
|
| 219 |
+
tensor_shape {
|
| 220 |
+
}
|
| 221 |
+
float_val: 0.0
|
| 222 |
+
}
|
| 223 |
+
maximum {
|
| 224 |
+
dtype: DT_FLOAT
|
| 225 |
+
tensor_shape {
|
| 226 |
+
}
|
| 227 |
+
float_val: 1.0
|
| 228 |
+
}
|
| 229 |
+
}
|
| 230 |
+
}
|
| 231 |
+
}
|
| 232 |
+
}
|
| 233 |
+
}
|
| 234 |
+
}
|
| 235 |
+
values {
|
| 236 |
+
key: "next_step_type"
|
| 237 |
+
value {
|
| 238 |
+
tensor_spec_value {
|
| 239 |
+
name: "step_type"
|
| 240 |
+
shape {
|
| 241 |
+
}
|
| 242 |
+
dtype: DT_INT32
|
| 243 |
+
}
|
| 244 |
+
}
|
| 245 |
+
}
|
| 246 |
+
values {
|
| 247 |
+
key: "reward"
|
| 248 |
+
value {
|
| 249 |
+
tensor_spec_value {
|
| 250 |
+
name: "reward"
|
| 251 |
+
shape {
|
| 252 |
+
}
|
| 253 |
+
dtype: DT_FLOAT
|
| 254 |
+
}
|
| 255 |
+
}
|
| 256 |
+
}
|
| 257 |
+
values {
|
| 258 |
+
key: "discount"
|
| 259 |
+
value {
|
| 260 |
+
bounded_tensor_spec_value {
|
| 261 |
+
name: "discount"
|
| 262 |
+
shape {
|
| 263 |
+
}
|
| 264 |
+
dtype: DT_FLOAT
|
| 265 |
+
minimum {
|
| 266 |
+
dtype: DT_FLOAT
|
| 267 |
+
tensor_shape {
|
| 268 |
+
}
|
| 269 |
+
float_val: 0.0
|
| 270 |
+
}
|
| 271 |
+
maximum {
|
| 272 |
+
dtype: DT_FLOAT
|
| 273 |
+
tensor_shape {
|
| 274 |
+
}
|
| 275 |
+
float_val: 1.0
|
| 276 |
+
}
|
| 277 |
+
}
|
| 278 |
+
}
|
| 279 |
+
}
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
}
|
| 283 |
+
fields {
|
| 284 |
+
key: "policy_state_spec"
|
| 285 |
+
value {
|
| 286 |
+
dict_value {
|
| 287 |
+
fields {
|
| 288 |
+
key: "action_tokens"
|
| 289 |
+
value {
|
| 290 |
+
tensor_spec_value {
|
| 291 |
+
name: "action_tokens"
|
| 292 |
+
shape {
|
| 293 |
+
dim {
|
| 294 |
+
size: 6
|
| 295 |
+
}
|
| 296 |
+
dim {
|
| 297 |
+
size: 8
|
| 298 |
+
}
|
| 299 |
+
dim {
|
| 300 |
+
size: 1
|
| 301 |
+
}
|
| 302 |
+
dim {
|
| 303 |
+
size: 1
|
| 304 |
+
}
|
| 305 |
+
}
|
| 306 |
+
dtype: DT_INT32
|
| 307 |
+
}
|
| 308 |
+
}
|
| 309 |
+
}
|
| 310 |
+
fields {
|
| 311 |
+
key: "context_image_tokens"
|
| 312 |
+
value {
|
| 313 |
+
tensor_spec_value {
|
| 314 |
+
name: "context_image_tokens"
|
| 315 |
+
shape {
|
| 316 |
+
dim {
|
| 317 |
+
size: 6
|
| 318 |
+
}
|
| 319 |
+
dim {
|
| 320 |
+
size: 8
|
| 321 |
+
}
|
| 322 |
+
dim {
|
| 323 |
+
size: 1
|
| 324 |
+
}
|
| 325 |
+
dim {
|
| 326 |
+
size: 512
|
| 327 |
+
}
|
| 328 |
+
}
|
| 329 |
+
dtype: DT_FLOAT
|
| 330 |
+
}
|
| 331 |
+
}
|
| 332 |
+
}
|
| 333 |
+
fields {
|
| 334 |
+
key: "seq_idx"
|
| 335 |
+
value {
|
| 336 |
+
tensor_spec_value {
|
| 337 |
+
name: "seq_idx"
|
| 338 |
+
shape {
|
| 339 |
+
dim {
|
| 340 |
+
size: 1
|
| 341 |
+
}
|
| 342 |
+
dim {
|
| 343 |
+
size: 1
|
| 344 |
+
}
|
| 345 |
+
dim {
|
| 346 |
+
size: 1
|
| 347 |
+
}
|
| 348 |
+
dim {
|
| 349 |
+
size: 1
|
| 350 |
+
}
|
| 351 |
+
}
|
| 352 |
+
dtype: DT_INT32
|
| 353 |
+
}
|
| 354 |
+
}
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
}
|
| 359 |
+
}
|
saved_model.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d06f164c2877f6f1e2178b8c3fa29a04fcd48249f0279ab890684b0e40fcc59d
|
| 3 |
+
size 14824110
|
variables/variables.data-00000-of-00001
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e74387d110cbbf7e1b93bec3ba689df5050dffd93e5912869b21e56779683db6
|
| 3 |
+
size 158202888
|
variables/variables.index
ADDED
|
Binary file (13.5 kB). View file
|
|
|