1from typing import Union, Optional, List, Any, Tuple 2import os 3import torch 4from ditk import logging 5from functools import partial 6from tensorboardX import SummaryWriter 7from copy import deepcopy 8 9from ding.envs import get_vec_env_setting, create_env_manager 10from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \ 11 create_serial_collector 12from ding.config import read_config, compile_config 13from ding.policy import create_policy, PolicyFactory 14from ding.reward_model import create_reward_model 15from ding.utils import set_pkg_seed, get_rank 16from .utils import maybe_init_wandb, maybe_finish_wandb 17 18 19def serial_pipeline_onpolicy( 20 input_cfg: Union[str, Tuple[dict, dict]], 21 seed: int = 0, 22 env_setting: Optional[List[Any]] = None, 23 model: Optional[torch.nn.Module] = None, 24 max_train_iter: Optional[int] = int(1e10), 25 max_env_step: Optional[int] = int(1e10), 26) -> 'Policy': # noqa 27 """ 28 Overview: 29 Serial pipeline entry on-policy RL. 30 Arguments: 31 - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ 32 ``str`` type means config file path. \ 33 ``Tuple[dict, dict]`` type means [user_config, create_cfg]. 34 - seed (:obj:`int`): Random seed. 35 - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \ 36 ``BaseEnv`` subclass, collector env config, and evaluator env config. 37 - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. 38 - max_train_iter (:obj:`Optional[int]`): Maximum policy update iterations in training. 39 - max_env_step (:obj:`Optional[int]`): Maximum collected environment interaction steps. 40 Returns: 41 - policy (:obj:`Policy`): Converged policy. 42 """ 43 if isinstance(input_cfg, str): 44 cfg, create_cfg = read_config(input_cfg) 45 else: 46 cfg, create_cfg = deepcopy(input_cfg) 47 create_cfg.policy.type = create_cfg.policy.type + '_command' 48 env_fn = None if env_setting is None else env_setting[0] 49 cfg = compile_config( 50 cfg, 51 seed=seed, 52 env=env_fn, 53 auto=True, 54 create_cfg=create_cfg, 55 save_cfg=True, 56 renew_dir=not cfg.policy.learn.get('resume_training', False) 57 ) 58 59 # Create main components: env, policy 60 if env_setting is None: 61 env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env) 62 else: 63 env_fn, collector_env_cfg, evaluator_env_cfg = env_setting 64 collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]) 65 evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) 66 collector_env.seed(cfg.seed) 67 evaluator_env.seed(cfg.seed, dynamic_seed=False) 68 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 69 policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command']) 70 71 # Create worker components: learner, collector, evaluator, replay buffer, commander. 72 wandb_run = maybe_init_wandb(cfg) if get_rank() == 0 else None 73 tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) if get_rank() == 0 else None 74 learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) 75 collector = create_serial_collector( 76 cfg.policy.collect.collector, 77 env=collector_env, 78 policy=policy.collect_mode, 79 tb_logger=tb_logger, 80 exp_name=cfg.exp_name 81 ) 82 evaluator = InteractionSerialEvaluator( 83 cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name 84 ) 85 commander = BaseSerialCommander( 86 cfg.policy.other.commander, learner, collector, evaluator, None, policy.command_mode 87 ) 88 89 # ========== 90 # Main loop 91 # ========== 92 # Learner's before_run hook. 93 learner.call_hook('before_run') 94 if cfg.policy.learn.get('resume_training', False): 95 collector.envstep = learner.collector_envstep 96 97 while True: 98 collect_kwargs = commander.step() 99 # Evaluate policy performance 100 if evaluator.should_eval(learner.train_iter): 101 stop, eval_info = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep) 102 if stop: 103 break 104 # Collect data by default config n_sample/n_episode 105 new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs) 106 107 # Learn policy from collected data 108 learner.train(new_data, collector.envstep) 109 if collector.envstep >= max_env_step or learner.train_iter >= max_train_iter: 110 break 111 112 # Learner's after_run hook. 113 learner.call_hook('after_run') 114 import time 115 import pickle 116 import numpy as np 117 with open(os.path.join(cfg.exp_name, 'result.pkl'), 'wb') as f: 118 eval_value_raw = eval_info['eval_episode_return'] 119 final_data = { 120 'stop': stop, 121 'env_step': collector.envstep, 122 'train_iter': learner.train_iter, 123 'eval_value': np.mean(eval_value_raw), 124 'eval_value_raw': eval_value_raw, 125 'finish_time': time.ctime(), 126 } 127 pickle.dump(final_data, f) 128 if get_rank() == 0: 129 maybe_finish_wandb(wandb_run) 130 return policy