Skip to content

ding.entry.serial_entry_plr

ding.entry.serial_entry_plr

serial_pipeline_plr(input_cfg, seed=0, env_setting=None, model=None, max_train_iter=int(10000000000.0), max_env_step=int(10000000000.0))

Overview

Serial pipeline entry for Priority Level Replay.

Arguments: - input_cfg (:obj:Union[str, Tuple[dict, dict]]): Config in dict type. str type means config file path. Tuple[dict, dict] type means [user_config, create_cfg]. - seed (:obj:int): Random seed. - env_setting (:obj:Optional[List[Any]]): A list with 3 elements: BaseEnv subclass, collector env config, and evaluator env config. - model (:obj:Optional[torch.nn.Module]): Instance of torch.nn.Module. - max_train_iter (:obj:Optional[int]): Maximum policy update iterations in training. - max_env_step (:obj:Optional[int]): Maximum collected environment interaction steps. Returns: - policy (:obj:Policy): Converged policy.

Full Source Code

../ding/entry/serial_entry_plr.py

1from typing import Union, Optional, List, Any, Tuple 2import os 3import torch 4import logging 5from functools import partial 6from tensorboardX import SummaryWriter 7from copy import deepcopy 8 9from ding.envs import get_vec_env_setting, create_env_manager 10from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \ 11 create_serial_collector 12from ding.config import read_config, compile_config 13from ding.policy import create_policy, PolicyFactory 14from ding.reward_model import create_reward_model 15from ding.utils import set_pkg_seed 16from ding.data.level_replay.level_sampler import LevelSampler 17from ding.policy.common_utils import default_preprocess_learn 18 19 20def generate_seeds(num_seeds=500, base_seed=0): 21 return [base_seed + i for i in range(num_seeds)] 22 23 24def serial_pipeline_plr( 25 input_cfg: Union[str, Tuple[dict, dict]], 26 seed: int = 0, 27 env_setting: Optional[List[Any]] = None, 28 model: Optional[torch.nn.Module] = None, 29 max_train_iter: Optional[int] = int(1e10), 30 max_env_step: Optional[int] = int(1e10), 31) -> 'Policy': # noqa 32 """ 33 Overview: 34 Serial pipeline entry for Priority Level Replay. 35 Arguments: 36 - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ 37 ``str`` type means config file path. \ 38 ``Tuple[dict, dict]`` type means [user_config, create_cfg]. 39 - seed (:obj:`int`): Random seed. 40 - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \ 41 ``BaseEnv`` subclass, collector env config, and evaluator env config. 42 - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. 43 - max_train_iter (:obj:`Optional[int]`): Maximum policy update iterations in training. 44 - max_env_step (:obj:`Optional[int]`): Maximum collected environment interaction steps. 45 Returns: 46 - policy (:obj:`Policy`): Converged policy. 47 """ 48 if isinstance(input_cfg, str): 49 cfg, create_cfg = read_config(input_cfg) 50 else: 51 cfg, create_cfg = deepcopy(input_cfg) 52 create_cfg.policy.type = create_cfg.policy.type + '_command' 53 env_fn = None if env_setting is None else env_setting[0] 54 cfg = compile_config(cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True) 55 collector_env_num = cfg.env.collector_env_num 56 # Create main components: env, policy 57 if env_setting is None: 58 env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env) 59 else: 60 env_fn, collector_env_cfg, evaluator_env_cfg = env_setting 61 collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]) 62 evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) 63 collector_env.seed(cfg.seed, dynamic_seed=False) 64 evaluator_env.seed(cfg.seed, dynamic_seed=True) 65 train_seeds = generate_seeds() 66 level_sampler = LevelSampler( 67 train_seeds, cfg.policy.model.obs_shape, cfg.policy.model.action_shape, collector_env_num, cfg.level_replay 68 ) 69 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 70 policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command']) 71 72 # Create worker components: learner, collector, evaluator, replay buffer, commander. 73 tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) 74 learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) 75 collector = create_serial_collector( 76 cfg.policy.collect.collector, 77 env=collector_env, 78 policy=policy.collect_mode, 79 tb_logger=tb_logger, 80 exp_name=cfg.exp_name 81 ) 82 evaluator = InteractionSerialEvaluator( 83 cfg.policy.eval.evaluator, evaluator_env, policy.collect_mode, tb_logger, exp_name=cfg.exp_name 84 ) 85 commander = BaseSerialCommander( 86 cfg.policy.other.commander, learner, collector, evaluator, None, policy.command_mode 87 ) 88 89 # ========== 90 # Main loop 91 # ========== 92 # Learner's before_run hook. 93 learner.call_hook('before_run') 94 95 seeds = [int(level_sampler.sample('sequential')) for _ in range(collector_env_num)] 96 # default_preprocess_learn function can only deal with the Tensor data 97 level_seeds = torch.Tensor(seeds) 98 99 collector_env.seed(seeds) 100 collector_env.reset() 101 102 while True: 103 collect_kwargs = commander.step() 104 # Evaluate policy performance 105 if evaluator.should_eval(learner.train_iter): 106 stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep) 107 if stop: 108 break 109 # Collect data by default config n_sample/n_episode 110 new_data = collector.collect( 111 train_iter=learner.train_iter, level_seeds=level_seeds, policy_kwargs=collect_kwargs 112 ) 113 # Learn policy from collected data 114 learner.train(new_data, collector.envstep) 115 stacked_data = default_preprocess_learn(new_data, ignore_done=cfg.policy.learn.ignore_done, use_nstep=False) 116 level_sampler.update_with_rollouts(stacked_data, collector_env_num) 117 seeds = [int(level_sampler.sample()) for _ in range(collector_env_num)] 118 level_seeds = torch.Tensor(seeds) 119 collector_env.seed(seeds) 120 collector_env.reset() 121 if collector.envstep >= max_env_step or learner.train_iter >= max_train_iter: 122 break 123 # Learner's after_run hook. 124 learner.call_hook('after_run') 125 return policy