Skip to content

ding.entry.serial_entry_bc

ding.entry.serial_entry_bc

serial_pipeline_bc(input_cfg, seed, data_path, model=None, max_iter=int(1000000.0))

Overview

Serial pipeline entry of imitation learning.

Arguments: - input_cfg (:obj:Union[str, Tuple[dict, dict]]): Config in dict type. \ str type means config file path. \ Tuple[dict, dict] type means [user_config, create_cfg]. - seed (:obj:int): Random seed. - data_path (:obj:str): Path of training data. - model (:obj:Optional[torch.nn.Module]): Instance of torch.nn.Module. Returns: - policy (:obj:Policy): Converged policy. - convergence (:obj:bool): whether il training is converged

Full Source Code

../ding/entry/serial_entry_bc.py

1from typing import Union, Optional, Tuple 2import os 3import torch 4from functools import partial 5from tensorboardX import SummaryWriter 6from copy import deepcopy 7from torch.utils.data import DataLoader 8 9from ding.envs import get_vec_env_setting, create_env_manager 10from ding.worker import BaseLearner, InteractionSerialEvaluator 11from ding.config import read_config, compile_config 12from ding.policy import create_policy 13from ding.utils import set_pkg_seed 14from ding.utils.data import NaiveRLDataset 15 16 17def serial_pipeline_bc( 18 input_cfg: Union[str, Tuple[dict, dict]], 19 seed: int, 20 data_path: str, 21 model: Optional[torch.nn.Module] = None, 22 max_iter=int(1e6), 23) -> Union['Policy', bool]: # noqa 24 r""" 25 Overview: 26 Serial pipeline entry of imitation learning. 27 Arguments: 28 - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ 29 ``str`` type means config file path. \ 30 ``Tuple[dict, dict]`` type means [user_config, create_cfg]. 31 - seed (:obj:`int`): Random seed. 32 - data_path (:obj:`str`): Path of training data. 33 - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. 34 Returns: 35 - policy (:obj:`Policy`): Converged policy. 36 - convergence (:obj:`bool`): whether il training is converged 37 """ 38 cont = input_cfg[0].policy.continuous 39 40 if isinstance(input_cfg, str): 41 cfg, create_cfg = read_config(input_cfg) 42 else: 43 cfg, create_cfg = deepcopy(input_cfg) 44 cfg = compile_config(cfg, seed=seed, auto=True, create_cfg=create_cfg) 45 46 # Env, Policy 47 env_fn, _, evaluator_env_cfg = get_vec_env_setting(cfg.env) 48 evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) 49 # Random seed 50 evaluator_env.seed(cfg.seed, dynamic_seed=False) 51 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 52 policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'eval']) 53 54 # Main components 55 tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) 56 dataset = NaiveRLDataset(data_path) 57 dataloader = DataLoader(dataset[:-len(dataset) // 10], cfg.policy.learn.batch_size, collate_fn=lambda x: x) 58 eval_loader = DataLoader( 59 dataset[-len(dataset) // 10:], 60 cfg.policy.learn.batch_size, 61 ) 62 learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) 63 evaluator = InteractionSerialEvaluator( 64 cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name 65 ) 66 # ========== 67 # Main loop 68 # ========== 69 learner.call_hook('before_run') 70 stop = False 71 iter_cnt = 0 72 for epoch in range(cfg.policy.learn.train_epoch): 73 # Evaluate policy performance 74 loss_list = [] 75 for _, bat in enumerate(eval_loader): 76 res = policy._forward_eval(bat['obs']) 77 if cont: 78 loss_list.append(torch.nn.L1Loss()(res['action'], bat['action'].squeeze(-1)).item()) 79 else: 80 res = torch.argmax(res['logit'], dim=1) 81 loss_list.append(torch.sum(res == bat['action'].squeeze(-1)).item() / bat['action'].shape[0]) 82 if cont: 83 label = 'validation_loss' 84 else: 85 label = 'validation_acc' 86 tb_logger.add_scalar(label, sum(loss_list) / len(loss_list), iter_cnt) 87 for i, train_data in enumerate(dataloader): 88 if evaluator.should_eval(learner.train_iter): 89 stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter) 90 if stop: 91 break 92 learner.train(train_data) 93 iter_cnt += 1 94 if iter_cnt >= max_iter: 95 stop = True 96 break 97 if stop: 98 break 99 100 learner.call_hook('after_run') 101 print('final reward is: {}'.format(reward)) 102 return policy, stop