Skip to content

ding.entry.serial_entry_td3_vae

ding.entry.serial_entry_td3_vae

serial_pipeline_td3_vae(input_cfg, seed=0, env_setting=None, model=None, max_train_iter=int(10000000000.0), max_env_step=int(10000000000.0))

Overview

Serial pipeline entry for VAE latent action.

Arguments: - input_cfg (:obj:Union[str, Tuple[dict, dict]]): Config in dict type. str type means config file path. Tuple[dict, dict] type means [user_config, create_cfg]. - seed (:obj:int): Random seed. - env_setting (:obj:Optional[List[Any]]): A list with 3 elements: BaseEnv subclass, collector env config, and evaluator env config. - model (:obj:Optional[torch.nn.Module]): Instance of torch.nn.Module. - max_train_iter (:obj:Optional[int]): Maximum policy update iterations in training. - max_env_step (:obj:Optional[int]): Maximum collected environment interaction steps. Returns: - policy (:obj:Policy): Converged policy.

Full Source Code

../ding/entry/serial_entry_td3_vae.py

1from typing import Union, Optional, List, Any, Tuple 2import os 3import torch 4from ditk import logging 5import copy 6from functools import partial 7from tensorboardX import SummaryWriter 8from copy import deepcopy 9 10from ding.envs import get_vec_env_setting, create_env_manager 11from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \ 12 create_serial_collector 13from ding.config import read_config, compile_config 14from ding.policy import create_policy 15from ding.utils import set_pkg_seed 16from .utils import random_collect, mark_not_expert, mark_warm_up 17 18 19def serial_pipeline_td3_vae( 20 input_cfg: Union[str, Tuple[dict, dict]], 21 seed: int = 0, 22 env_setting: Optional[List[Any]] = None, 23 model: Optional[torch.nn.Module] = None, 24 max_train_iter: Optional[int] = int(1e10), 25 max_env_step: Optional[int] = int(1e10), 26) -> 'Policy': # noqa 27 """ 28 Overview: 29 Serial pipeline entry for VAE latent action. 30 Arguments: 31 - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ 32 ``str`` type means config file path. \ 33 ``Tuple[dict, dict]`` type means [user_config, create_cfg]. 34 - seed (:obj:`int`): Random seed. 35 - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \ 36 ``BaseEnv`` subclass, collector env config, and evaluator env config. 37 - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. 38 - max_train_iter (:obj:`Optional[int]`): Maximum policy update iterations in training. 39 - max_env_step (:obj:`Optional[int]`): Maximum collected environment interaction steps. 40 Returns: 41 - policy (:obj:`Policy`): Converged policy. 42 """ 43 if isinstance(input_cfg, str): 44 cfg, create_cfg = read_config(input_cfg) 45 else: 46 cfg, create_cfg = deepcopy(input_cfg) 47 create_cfg.policy.type = create_cfg.policy.type + '_command' 48 env_fn = None if env_setting is None else env_setting[0] 49 cfg = compile_config(cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True) 50 # Create main components: env, policy 51 if env_setting is None: 52 env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env) 53 else: 54 env_fn, collector_env_cfg, evaluator_env_cfg = env_setting 55 collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]) 56 evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) 57 collector_env.seed(cfg.seed) 58 evaluator_env.seed(cfg.seed, dynamic_seed=False) 59 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 60 policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command']) 61 62 # Create worker components: learner, collector, evaluator, replay buffer, commander. 63 tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) 64 learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) 65 collector = create_serial_collector( 66 cfg.policy.collect.collector, 67 env=collector_env, 68 policy=policy.collect_mode, 69 tb_logger=tb_logger, 70 exp_name=cfg.exp_name 71 ) 72 evaluator = InteractionSerialEvaluator( 73 cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name 74 ) 75 replay_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name) 76 replay_buffer_recent = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name) 77 78 commander = BaseSerialCommander( 79 cfg.policy.other.commander, learner, collector, evaluator, replay_buffer, policy.command_mode 80 ) 81 # ========== 82 # Main loop 83 # ========== 84 # Learner's before_run hook. 85 learner.call_hook('before_run') 86 87 # Accumulate plenty of data at the beginning of training. 88 if cfg.policy.get('random_collect_size', 0) > 0: 89 # backup 90 # if cfg.policy.get('transition_with_policy_data', False): 91 # collector.reset_policy(policy.collect_mode) 92 # else: 93 # action_space = collector_env.action_space 94 # random_policy = PolicyFactory.get_random_policy(policy.collect_mode, action_space=action_space) 95 # collector.reset_policy(random_policy) 96 # collect_kwargs = commander.step() 97 # new_data = collector.collect(n_sample=cfg.policy.random_collect_size, policy_kwargs=collect_kwargs) 98 # for item in new_data: 99 # item['warm_up'] = True 100 # replay_buffer.push(new_data, cur_collector_envstep=0) 101 # collector.reset_policy(policy.collect_mode) 102 # postprocess_data_fn = lambda x: mark_warm_up(mark_not_expert(x)) 103 random_collect( 104 cfg.policy, 105 policy, 106 collector, 107 collector_env, 108 commander, 109 replay_buffer, 110 postprocess_data_fn=lambda x: mark_warm_up(mark_not_expert(x)) # postprocess_data_fn 111 ) 112 # warm_up 113 # Learn policy from collected data 114 for i in range(cfg.policy.learn.warm_up_update): 115 # Learner will train ``update_per_collect`` times in one iteration. 116 train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter) 117 if train_data is None: 118 # It is possible that replay buffer's data count is too few to train ``update_per_collect`` times 119 logging.warning( 120 "Replay buffer's data can only train for {} steps. ".format(i) + 121 "You can modify data collect config, e.g. increasing n_sample, n_episode." 122 ) 123 break 124 learner.train(train_data, collector.envstep) 125 126 if learner.policy.get_attribute('priority'): 127 replay_buffer.update(learner.priority_info) 128 replay_buffer.clear() # NOTE 129 130 # NOTE: for the case collector_env_num>1, because after the random collect phase, self._traj_buffer[env_id] may 131 # be not empty. Only if the condition "timestep.done or len(self._traj_buffer[env_id]) == self._traj_len" is 132 # satisfied, the self._traj_buffer will be clear. For our alg., the data in self._traj_buffer[env_id], 133 # latent_action=False, cannot be used in rl_vae phase. 134 collector.reset(policy.collect_mode) 135 136 count = 0 137 while True: 138 collect_kwargs = commander.step() 139 # Evaluate policy performance 140 if evaluator.should_eval(learner.train_iter): 141 stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep) 142 if stop: 143 break 144 # Collect data by default config n_sample/n_episode 145 new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs) 146 for item in new_data: 147 item['warm_up'] = False 148 replay_buffer.push(new_data, cur_collector_envstep=collector.envstep) 149 replay_buffer_recent.push(copy.deepcopy(new_data), cur_collector_envstep=collector.envstep) 150 151 # rl phase 152 if count % cfg.policy.learn.rl_vae_update_circle in range(0, cfg.policy.learn.rl_vae_update_circle): 153 # Learn policy from collected data 154 for i in range(cfg.policy.learn.update_per_collect_rl): 155 # Learner will train ``update_per_collect`` times in one iteration. 156 train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter) 157 if train_data is not None: 158 for item in train_data: 159 item['rl_phase'] = True 160 item['vae_phase'] = False 161 if train_data is None: 162 # It is possible that replay buffer's data count is too few to train ``update_per_collect`` times 163 logging.warning( 164 "Replay buffer's data can only train for {} steps. ".format(i) + 165 "You can modify data collect config, e.g. increasing n_sample, n_episode." 166 ) 167 break 168 learner.train(train_data, collector.envstep) 169 if learner.policy.get_attribute('priority'): 170 replay_buffer.update(learner.priority_info) 171 172 # vae phase 173 if count % cfg.policy.learn.rl_vae_update_circle in range(cfg.policy.learn.rl_vae_update_circle - 1, 174 cfg.policy.learn.rl_vae_update_circle): 175 for i in range(cfg.policy.learn.update_per_collect_vae): 176 # Learner will train ``update_per_collect`` times in one iteration. 177 # TODO(pu): different sample style 178 train_data_history = replay_buffer.sample( 179 int(learner.policy.get_attribute('batch_size') / 2), learner.train_iter 180 ) 181 train_data_recent = replay_buffer_recent.sample( 182 int(learner.policy.get_attribute('batch_size') / 2), learner.train_iter 183 ) 184 train_data = train_data_history + train_data_recent 185 186 if train_data is not None: 187 for item in train_data: 188 item['rl_phase'] = False 189 item['vae_phase'] = True 190 if train_data is None: 191 # It is possible that replay buffer's data count is too few to train ``update_per_collect`` times 192 logging.warning( 193 "Replay buffer's data can only train for {} steps. ".format(i) + 194 "You can modify data collect config, e.g. increasing n_sample, n_episode." 195 ) 196 break 197 learner.train(train_data, collector.envstep) 198 if learner.policy.get_attribute('priority'): 199 replay_buffer.update(learner.priority_info) 200 replay_buffer_recent.clear() # NOTE 201 if collector.envstep >= max_env_step or learner.train_iter >= max_train_iter: 202 break 203 count += 1 204 205 # Learner's after_run hook. 206 learner.call_hook('after_run') 207 return policy