Skip to content

ding.entry.serial_entry_ngu

ding.entry.serial_entry_ngu

serial_pipeline_ngu(input_cfg, seed=0, env_setting=None, model=None, max_train_iter=int(10000000000.0), max_env_step=int(10000000000.0))

Overview

Serial pipeline entry for NGU. The corresponding paper is never give up: learning directed exploration strategies.

Arguments: - input_cfg (:obj:Union[str, Tuple[dict, dict]]): Config in dict type. str type means config file path. Tuple[dict, dict] type means [user_config, create_cfg]. - seed (:obj:int): Random seed. - env_setting (:obj:Optional[List[Any]]): A list with 3 elements: BaseEnv subclass, collector env config, and evaluator env config. - model (:obj:Optional[torch.nn.Module]): Instance of torch.nn.Module. - max_train_iter (:obj:Optional[int]): Maximum policy update iterations in training. - max_env_step (:obj:Optional[int]): Maximum collected environment interaction steps. Returns: - policy (:obj:Policy): Converged policy.

Full Source Code

../ding/entry/serial_entry_ngu.py

1from typing import Union, Optional, List, Any, Tuple 2import os 3import torch 4from ditk import logging 5from functools import partial 6from tensorboardX import SummaryWriter 7from copy import deepcopy 8 9from ding.envs import get_vec_env_setting, create_env_manager 10from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \ 11 create_serial_collector 12from ding.config import read_config, compile_config 13from ding.policy import create_policy 14from ding.reward_model import create_reward_model 15from ding.utils import set_pkg_seed 16from .utils import random_collect 17 18 19def serial_pipeline_ngu( 20 input_cfg: Union[str, Tuple[dict, dict]], 21 seed: int = 0, 22 env_setting: Optional[List[Any]] = None, 23 model: Optional[torch.nn.Module] = None, 24 max_train_iter: Optional[int] = int(1e10), 25 max_env_step: Optional[int] = int(1e10), 26) -> 'Policy': # noqa 27 """ 28 Overview: 29 Serial pipeline entry for NGU. The corresponding paper is 30 `never give up: learning directed exploration strategies`. 31 Arguments: 32 - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ 33 ``str`` type means config file path. \ 34 ``Tuple[dict, dict]`` type means [user_config, create_cfg]. 35 - seed (:obj:`int`): Random seed. 36 - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \ 37 ``BaseEnv`` subclass, collector env config, and evaluator env config. 38 - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. 39 - max_train_iter (:obj:`Optional[int]`): Maximum policy update iterations in training. 40 - max_env_step (:obj:`Optional[int]`): Maximum collected environment interaction steps. 41 Returns: 42 - policy (:obj:`Policy`): Converged policy. 43 """ 44 if isinstance(input_cfg, str): 45 cfg, create_cfg = read_config(input_cfg) 46 else: 47 cfg, create_cfg = deepcopy(input_cfg) 48 create_cfg.policy.type = create_cfg.policy.type + '_command' 49 env_fn = None if env_setting is None else env_setting[0] 50 cfg = compile_config( 51 cfg, 52 seed=seed, 53 env=env_fn, 54 auto=True, 55 create_cfg=create_cfg, 56 save_cfg=True, 57 renew_dir=not cfg.policy.learn.get('resume_training', False) 58 ) 59 # Create main components: env, policy 60 if env_setting is None: 61 env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env) 62 else: 63 env_fn, collector_env_cfg, evaluator_env_cfg = env_setting 64 collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]) 65 evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) 66 # if you want to save replay, please uncomment this line 67 # evaluator_env.enable_save_replay(cfg.env.replay_path) 68 69 collector_env.seed(cfg.seed) 70 evaluator_env.seed(cfg.seed, dynamic_seed=False) 71 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 72 policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command']) 73 74 # Create worker components: learner, collector, evaluator, replay buffer, commander. 75 tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) 76 learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) 77 collector = create_serial_collector( 78 cfg.policy.collect.collector, 79 env=collector_env, 80 policy=policy.collect_mode, 81 tb_logger=tb_logger, 82 exp_name=cfg.exp_name 83 ) 84 evaluator = InteractionSerialEvaluator( 85 cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name 86 ) 87 replay_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name) 88 commander = BaseSerialCommander( 89 cfg.policy.other.commander, learner, collector, evaluator, replay_buffer, policy.command_mode 90 ) 91 rnd_reward_model = create_reward_model(cfg.rnd_reward_model, policy.collect_mode.get_attribute('device'), tb_logger) 92 episodic_reward_model = create_reward_model( 93 cfg.episodic_reward_model, policy.collect_mode.get_attribute('device'), tb_logger 94 ) 95 # ========== 96 # Main loop 97 # ========== 98 # Learner's before_run hook. 99 learner.call_hook('before_run') 100 if cfg.policy.learn.get('resume_training', False): 101 collector.envstep = learner.collector_envstep 102 103 # Accumulate plenty of data at the beginning of training. 104 if cfg.policy.get('random_collect_size', 0) > 0: 105 random_collect(cfg.policy, policy, collector, collector_env, commander, replay_buffer) 106 107 estimate_cnt = 0 108 iter_ = 0 109 while True: 110 """some hyper-parameters used in NGU""" 111 # index_to_eps = {i: 0.4 ** (1 + 8 * i / (self._env_num - 1)) for i in range(self._env_num)} 112 # index_to_beta = { 113 # i: 0.3 * torch.sigmoid(torch.tensor(10 * (2 * i - (collector_env_num - 2)) / (collector_env_num - 2))) 114 # for i in range(collector_env_num) 115 # } 116 # index_to_gamma = { 117 # i: 1 - torch.exp( 118 # ( 119 # (collector_env_num - 1 - i) * torch.log(torch.tensor(1 - 0.997)) + 120 # i * torch.log(torch.tensor(1 - 0.99)) 121 # ) / (collector_env_num - 1) 122 # ) 123 # for i in range(collector_env_num) 124 # } 125 iter_ += 1 126 127 # Evaluate policy performance 128 if evaluator.should_eval(learner.train_iter): 129 stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep) 130 if stop: 131 break 132 # Collect data by default config n_sample/n_episode 133 new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=None) 134 135 # collect data for reward_model training 136 rnd_reward_model.collect_data(new_data) 137 episodic_reward_model.collect_data(new_data) 138 replay_buffer.push(new_data, cur_collector_envstep=collector.envstep) 139 140 # update reward_model 141 rnd_reward_model.train() 142 if (iter_ + 1) % cfg.rnd_reward_model.clear_buffer_per_iters == 0: 143 rnd_reward_model.clear_data() 144 episodic_reward_model.train() 145 if (iter_ + 1) % cfg.episodic_reward_model.clear_buffer_per_iters == 0: 146 episodic_reward_model.clear_data() 147 148 # Learn policy from collected data 149 for i in range(cfg.policy.learn.update_per_collect): 150 # Learner will train ``update_per_collect`` times in one iteration. 151 train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter) 152 if train_data is None: 153 # It is possible that replay buffer's data count is too few to train ``update_per_collect`` times 154 logging.warning( 155 "Replay buffer's data can only train for {} steps. ".format(i) + 156 "You can modify data collect config, e.g. increasing n_sample, n_episode." 157 ) 158 break 159 # calculate the inter-episodic and episodic intrinsic reward 160 rnd_reward = rnd_reward_model.estimate(train_data) 161 episodic_reward = episodic_reward_model.estimate(train_data) 162 163 # update train_data reward using the augmented reward 164 train_data_augmented, estimate_cnt = episodic_reward_model.fusion_reward( 165 train_data, 166 rnd_reward, 167 episodic_reward, 168 nstep=cfg.policy.nstep, 169 collector_env_num=cfg.policy.collect.env_num, 170 tb_logger=tb_logger, 171 estimate_cnt=estimate_cnt 172 ) 173 learner.train(train_data_augmented, collector.envstep) 174 if learner.policy.get_attribute('priority'): 175 replay_buffer.update(learner.priority_info) 176 if collector.envstep >= max_env_step or learner.train_iter >= max_train_iter: 177 break 178 179 # Learner's after_run hook. 180 learner.call_hook('after_run') 181 return policy