Skip to content

ding.entry.serial_entry_pc

ding.entry.serial_entry_pc

serial_pipeline_pc(input_cfg, seed=0, model=None, max_iter=int(1000000.0))

Overview

Serial pipeline entry of procedure cloning using BFS as expert policy.

Arguments: - input_cfg (:obj:Union[str, Tuple[dict, dict]]): Config in dict type. \ str type means config file path. \ Tuple[dict, dict] type means [user_config, create_cfg]. - seed (:obj:int): Random seed. - model (:obj:Optional[torch.nn.Module]): Instance of torch.nn.Module. - max_iter (:obj:Optional[int]): Max iteration for executing PC training. Returns: - policy (:obj:Policy): Converged policy. - convergence (:obj:bool): whether the training is converged

Full Source Code

../ding/entry/serial_entry_pc.py

1from typing import Union, Optional, Tuple 2import os 3from functools import partial 4from copy import deepcopy 5 6import torch 7from tensorboardX import SummaryWriter 8from torch.utils.data import DataLoader 9 10from ding.envs import get_vec_env_setting, create_env_manager 11from ding.worker import BaseLearner, InteractionSerialEvaluator 12from ding.config import read_config, compile_config 13from ding.policy import create_policy 14from ding.utils import set_pkg_seed 15from ding.utils.data.dataset import load_bfs_datasets 16 17 18def serial_pipeline_pc( 19 input_cfg: Union[str, Tuple[dict, dict]], 20 seed: int = 0, 21 model: Optional[torch.nn.Module] = None, 22 max_iter=int(1e6), 23) -> Union['Policy', bool]: # noqa 24 r""" 25 Overview: 26 Serial pipeline entry of procedure cloning using BFS as expert policy. 27 Arguments: 28 - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ 29 ``str`` type means config file path. \ 30 ``Tuple[dict, dict]`` type means [user_config, create_cfg]. 31 - seed (:obj:`int`): Random seed. 32 - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. 33 - max_iter (:obj:`Optional[int]`): Max iteration for executing PC training. 34 Returns: 35 - policy (:obj:`Policy`): Converged policy. 36 - convergence (:obj:`bool`): whether the training is converged 37 """ 38 if isinstance(input_cfg, str): 39 cfg, create_cfg = read_config(input_cfg) 40 else: 41 cfg, create_cfg = deepcopy(input_cfg) 42 cfg = compile_config(cfg, seed=seed, auto=True, create_cfg=create_cfg) 43 44 # Env, Policy 45 env_fn, _, evaluator_env_cfg = get_vec_env_setting(cfg.env) 46 evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) 47 # Random seed 48 evaluator_env.seed(cfg.seed, dynamic_seed=False) 49 set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) 50 policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'eval']) 51 52 # Main components 53 tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) 54 train_data, test_data = load_bfs_datasets(train_seeds=cfg.train_seeds) 55 dataloader = DataLoader(train_data, batch_size=cfg.policy.learn.batch_size, shuffle=True) 56 test_dataloader = DataLoader(test_data, batch_size=cfg.policy.learn.batch_size, shuffle=True) 57 learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) 58 evaluator = InteractionSerialEvaluator( 59 cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name 60 ) 61 62 # ========== 63 # Main loop 64 # ========== 65 learner.call_hook('before_run') 66 stop = False 67 iter_cnt = 0 68 for epoch in range(cfg.policy.learn.train_epoch): 69 # train 70 criterion = torch.nn.CrossEntropyLoss() 71 for i, train_data in enumerate(dataloader): 72 learner.train(train_data) 73 iter_cnt += 1 74 if iter_cnt >= max_iter: 75 stop = True 76 break 77 if epoch % 69 == 0: 78 policy._optimizer.param_groups[0]['lr'] /= 10 79 if stop: 80 break 81 losses = [] 82 acces = [] 83 # Evaluation 84 for _, test_data in enumerate(test_dataloader): 85 observations, bfs_input_maps, bfs_output_maps = test_data['obs'], test_data['bfs_in'].long(), \ 86 test_data['bfs_out'].long() 87 states = observations 88 bfs_input_onehot = torch.nn.functional.one_hot(bfs_input_maps, 5).float() 89 90 bfs_states = torch.cat([ 91 states, 92 bfs_input_onehot, 93 ], dim=-1).cuda() 94 logits = policy._model(bfs_states)['logit'] 95 logits = logits.flatten(0, -2) 96 labels = bfs_output_maps.flatten(0, -1).cuda() 97 98 loss = criterion(logits, labels).item() 99 preds = torch.argmax(logits, dim=-1) 100 acc = torch.sum((preds == labels)) / preds.shape[0] 101 102 losses.append(loss) 103 acces.append(acc) 104 print('Test Finished! Loss: {} acc: {}'.format(sum(losses) / len(losses), sum(acces) / len(acces))) 105 stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter) 106 learner.call_hook('after_run') 107 print('final reward is: {}'.format(reward)) 108 return policy, stop