1from typing import Union, Optional, List, Any, Tuple 2import os 3import copy 4import torch 5from ditk import logging 6from functools import partial 7from tensorboardX import SummaryWriter 8from copy import deepcopy 9 10from ding.envs import get_vec_env_setting, create_env_manager 11from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \ 12 create_serial_collector 13from ding.config import read_config, compile_config 14from ding.policy import create_policy 15from ding.reward_model import create_reward_model 16from ding.utils import set_pkg_seed, save_file 17from .utils import random_collect 18 19 20def serial_pipeline_guided_cost( 21 input_cfg: Union[str, Tuple[dict, dict]], 22 seed: int = 0, 23 env_setting: Optional[List[Any]] = None, 24 model: Optional[torch.nn.Module] = None, 25 expert_model: Optional[torch.nn.Module] = None, 26 max_train_iter: Optional[int] = int(1e10), 27 max_env_step: Optional[int] = int(1e10), 28) -> 'Policy': # noqa 29 """ 30 Overview: 31 Serial pipeline guided cost: we create this serial pipeline in order to\ 32 implement guided cost learning in DI-engine. For now, we support the following envs\ 33 Cartpole, Lunarlander, Hopper, Halfcheetah, Walker2d. The demonstration\ 34 data come from the expert model. We use a well-trained model to \ 35 generate demonstration data online 36 Arguments: 37 - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ 38 ``str`` type means config file path. \ 39 ``Tuple[dict, dict]`` type means [user_config, create_cfg]. 40 - seed (:obj:`int`): Random seed. 41 - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \ 42 ``BaseEnv`` subclass, collector env config, and evaluator env config. 43 - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. 44 - expert_model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module.\ 45 The default model is DQN(**cfg.policy.model) 46 - max_train_iter (:obj:`Optional[int]`): Maximum policy update iterations in training. 47 - max_env_step (:obj:`Optional[int]`): Maximum collected environment interaction steps. 48 Returns: 49 - policy (:obj:`Policy`): Converged policy. 50 """ 51 if isinstance(input_cfg, str): 52 cfg, create_cfg = read_config(input_cfg) 53 else: 54 cfg, create_cfg = deepcopy(input_cfg) 55 create_cfg.policy.type = create_cfg.policy.type + '_command' 56 env_fn = None if env_setting is None else env_setting[0] 57 cfg = compile_config(cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True) 58 # Create main components: env, policy 59 if env_setting is None: 60 env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env) 61 else: 62 env_fn, collector_env_cfg, evaluator_env_cfg = env_setting 63 collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]) 64 expert_collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]) 65 evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) 66 expert_collector_env.seed(cfg.seed) 67 collector_env.seed(cfg.seed) 68 evaluator_env.seed(cfg.seed, dynamic_seed=False) 69 expert_policy = create_policy(cfg.policy, model=expert_model, enable_field=['learn', 'collect']) 70 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 71 policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command']) 72 expert_policy.collect_mode.load_state_dict(torch.load(cfg.policy.collect.model_path, map_location='cpu')) 73 # Create worker components: learner, collector, evaluator, replay buffer, commander. 74 tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) 75 learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) 76 collector = create_serial_collector( 77 cfg.policy.collect.collector, 78 env=collector_env, 79 policy=policy.collect_mode, 80 tb_logger=tb_logger, 81 exp_name=cfg.exp_name 82 ) 83 expert_collector = create_serial_collector( 84 cfg.policy.collect.collector, 85 env=expert_collector_env, 86 policy=expert_policy.collect_mode, 87 tb_logger=tb_logger, 88 exp_name=cfg.exp_name 89 ) 90 evaluator = InteractionSerialEvaluator( 91 cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name 92 ) 93 replay_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name) 94 expert_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name) 95 commander = BaseSerialCommander( 96 cfg.policy.other.commander, learner, collector, evaluator, replay_buffer, policy.command_mode 97 ) 98 99 reward_model = create_reward_model(cfg.reward_model, policy.collect_mode.get_attribute('device'), tb_logger) 100 # ========== 101 # Main loop 102 # ========== 103 # Learner's before_run hook. 104 learner.call_hook('before_run') 105 106 # Accumulate plenty of data at the beginning of training. 107 if cfg.policy.get('random_collect_size', 0) > 0: 108 random_collect(cfg.policy, policy, collector, collector_env, commander, replay_buffer) 109 dirname = cfg.exp_name + '/reward_model' 110 if not os.path.exists(dirname): 111 try: 112 os.makedirs(dirname) 113 except FileExistsError: 114 pass 115 while True: 116 collect_kwargs = commander.step() 117 # Evaluate policy performance 118 if evaluator.should_eval(learner.train_iter): 119 stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep) 120 if stop: 121 break 122 # Collect data by default config n_sample/n_episode 123 new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs) 124 # NOTE: deepcopy data is very important, 125 # otherwise the data in the replay buffer will be incorrectly modified. 126 # NOTE: this line cannot move to line130, because in line134 the data may be modified in-place. 127 train_data = copy.deepcopy(new_data) 128 expert_data = expert_collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs) 129 replay_buffer.push(new_data, cur_collector_envstep=collector.envstep) 130 expert_buffer.push(expert_data, cur_collector_envstep=expert_collector.envstep) 131 # Learn policy from collected data 132 for i in range(cfg.reward_model.update_per_collect): 133 expert_demo = expert_buffer.sample(cfg.reward_model.batch_size, learner.train_iter) 134 samp = replay_buffer.sample(cfg.reward_model.batch_size, learner.train_iter) 135 reward_model.train(expert_demo, samp, learner.train_iter, collector.envstep) 136 for i in range(cfg.policy.learn.update_per_collect): 137 # Learner will train ``update_per_collect`` times in one iteration. 138 _ = reward_model.estimate(train_data) 139 if train_data is None: 140 # It is possible that replay buffer's data count is too few to train ``update_per_collect`` times 141 logging.warning( 142 "Replay buffer's data can only train for {} steps. ".format(i) + 143 "You can modify data collect config, e.g. increasing n_sample, n_episode." 144 ) 145 break 146 learner.train(train_data, collector.envstep) 147 if learner.policy.get_attribute('priority'): 148 replay_buffer.update(learner.priority_info) 149 if collector.envstep >= max_env_step or learner.train_iter >= max_train_iter: 150 break 151 # save reward model 152 if learner.train_iter % cfg.reward_model.store_model_every_n_train == 0: 153 #if learner.train_iter%5000 == 0: 154 path = os.path.join(dirname, 'iteration_{}.pth.tar'.format(learner.train_iter)) 155 state_dict = reward_model.state_dict_reward_model() 156 save_file(path, state_dict) 157 path = os.path.join(dirname, 'final_model.pth.tar') 158 state_dict = reward_model.state_dict_reward_model() 159 save_file(path, state_dict) 160 # Learner's after_run hook. 161 learner.call_hook('after_run') 162 return policy