Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- model.fcn16 __init__:13 :3.9 Mb super().__init__(n_classes)
- start maybe_cuda:85 :1370.9 Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ run_adaptation:314 :1370.9 Mb shift = -3
- __main__ run_adaptation:316 :1370.9 Mb setup, conf, stdout_tee = setup_config.get_setup_conf_log_dump(
- __main__ run_adaptation:317 :1370.9 Mb distance_name, path='./logs/'
- setup_config get_setup_conf_log_dump:72 :1370.9 Mb if '___' in setup_name:
- setup_config get_setup_conf_log_dump:75 :1370.9 Mb load_dict_name, log_fn_format = setup_name, setup_name
- setup_config get_setup_conf_log_dump:77 :1370.9 Mb setup_w_dump = get_setup_and_dump(load_dict_name, override)
- setup_config get_setup_and_dump:59 :1370.9 Mb override = override or {}
- setup_config get_setup_and_dump:60 :1370.9 Mb setup_dict = {'data': {}} # defaults
- setup_config get_setup_and_dump:61 :1370.9 Mb setup_dict = deep_dict_update(setup_dict, distance_config_[setup_name])
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:16 :1370.9 Mb if isinstance(v, collections.Mapping):
- setup_config deep_dict_update:20 :1370.9 Mb d[k] = u[k]
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:16 :1370.9 Mb if isinstance(v, collections.Mapping):
- setup_config deep_dict_update:17 :1370.9 Mb r = deep_dict_update(d.get(k, {}), v)
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:16 :1370.9 Mb if isinstance(v, collections.Mapping):
- setup_config deep_dict_update:20 :1370.9 Mb d[k] = u[k]
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:16 :1370.9 Mb if isinstance(v, collections.Mapping):
- setup_config deep_dict_update:20 :1370.9 Mb d[k] = u[k]
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:16 :1370.9 Mb if isinstance(v, collections.Mapping):
- setup_config deep_dict_update:20 :1370.9 Mb d[k] = u[k]
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:21 :1370.9 Mb return d
- setup_config deep_dict_update:18 :1370.9 Mb d[k] = r
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:16 :1370.9 Mb if isinstance(v, collections.Mapping):
- setup_config deep_dict_update:20 :1370.9 Mb d[k] = u[k]
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:21 :1370.9 Mb return d
- setup_config get_setup_and_dump:62 :1370.9 Mb setup_dict = deep_dict_update(setup_dict, override)
- setup_config deep_dict_update:15 :1370.9 Mb for k, v in u.items():
- setup_config deep_dict_update:21 :1370.9 Mb return d
- setup_config get_setup_and_dump:64 :1370.9 Mb 'optimizer': _exec_config(setup_dict.get('optimizer', None)),
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:37 :1370.9 Mb and len(packed_object) == 2
- setup_config _exec_config:38 :1370.9 Mb and callable(packed_object[0])
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:37 :1370.9 Mb and len(packed_object) == 2
- setup_config _exec_config:38 :1370.9 Mb and callable(packed_object[0])
- setup_config _exec_config:39 :1370.9 Mb and type(packed_object[1]) is dict):
- setup_config _exec_config:40 :1370.9 Mb repacked_object = partial(packed_object[0], **packed_object[1])
- setup_config _exec_config:41 :1370.9 Mb if isinstance(packed_object[0], types.FunctionType):
- setup_config _exec_config:44 :1370.9 Mb return repacked_object
- setup_config get_setup_and_dump:65 :1370.9 Mb 'distance': _exec_config(setup_dict.get('distance', None)),
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:37 :1370.9 Mb and len(packed_object) == 2
- setup_config _exec_config:38 :1370.9 Mb and callable(packed_object[0])
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:34 :1370.9 Mb packed_object = type(packed_object)(map(_exec_config, packed_object))
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:37 :1370.9 Mb and len(packed_object) == 2
- setup_config _exec_config:38 :1370.9 Mb and callable(packed_object[0])
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:25 :1370.9 Mb if packed_object is None:
- setup_config _exec_config:30 :1370.9 Mb if type(packed_object) is dict:
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:37 :1370.9 Mb and len(packed_object) == 2
- setup_config _exec_config:38 :1370.9 Mb and callable(packed_object[0])
- setup_config _exec_config:39 :1370.9 Mb and type(packed_object[1]) is dict):
- setup_config _exec_config:40 :1370.9 Mb repacked_object = partial(packed_object[0], **packed_object[1])
- setup_config _exec_config:41 :1370.9 Mb if isinstance(packed_object[0], types.FunctionType):
- setup_config _exec_config:44 :1370.9 Mb return repacked_object
- setup_config <dictcomp>:31 :1370.9 Mb packed_object = {k: _exec_config(v) for k, v in packed_object.items()}
- setup_config _exec_config:33 :1370.9 Mb if isinstance(packed_object, (list, tuple)):
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:46 :1370.9 Mb return packed_object
- setup_config _exec_config:36 :1370.9 Mb if (type(packed_object) is tuple
- setup_config _exec_config:37 :1370.9 Mb and len(packed_object) == 2
- setup_config _exec_config:38 :1370.9 Mb and callable(packed_object[0])
- setup_config _exec_config:39 :1370.9 Mb and type(packed_object[1]) is dict):
- setup_config _exec_config:40 :1370.9 Mb repacked_object = partial(packed_object[0], **packed_object[1])
- setup_config _exec_config:41 :1370.9 Mb if isinstance(packed_object[0], types.FunctionType):
- setup_config _exec_config:44 :1370.9 Mb return repacked_object
- setup_config get_setup_and_dump:66 :1370.9 Mb 'data': lambda: read_pytorch(**setup_dict['data']),
- setup_config get_setup_and_dump:67 :1370.9 Mb }, setup_dict)
- setup_config get_setup_conf_log_dump:78 :1370.9 Mb time_str = datetime.datetime.now().__format__(time_fmt)
- setup_config get_setup_conf_log_dump:79 :1370.9 Mb os.makedirs(path, exist_ok=True)
- setup_config get_setup_conf_log_dump:80 :1370.9 Mb fn = os.path.join(path, fn_fmt.format(setup=log_fn_format, now=time_str))
- setup_config get_setup_conf_log_dump:81 :1370.9 Mb with open(fn, 'w') as f:
- setup_config get_setup_conf_log_dump:82 :1370.9 Mb f.write(custom_json.dumps(setup_w_dump[1], indent=2))
- utils.custom_json default:14 :1370.9 Mb if isinstance(obj, type):
- utils.custom_json default:15 :1370.9 Mb return {'_type': (obj.__module__, obj.__name__)}
- utils.custom_json default:14 :1370.9 Mb if isinstance(obj, type):
- utils.custom_json default:15 :1370.9 Mb return {'_type': (obj.__module__, obj.__name__)}
- utils.custom_json default:14 :1370.9 Mb if isinstance(obj, type):
- utils.custom_json default:15 :1370.9 Mb return {'_type': (obj.__module__, obj.__name__)}
- setup_config get_setup_conf_log_dump:83 :1370.9 Mb tee_context_manager = tee_stdout(fn+'.log')
- setup_config get_setup_conf_log_dump:84 :1370.9 Mb return setup_w_dump[0], setup_w_dump[1], tee_context_manager
- __main__ run_adaptation:320 :1370.9 Mb distance_function = setup['distance']().cuda()
- distances.mlp __init__:16 :1370.9 Mb super().__init__(shapes, *argv, **kwargs)
- distances.mlp_base __init__:18 :1370.9 Mb super().__init__()
- distances.mlp_base __init__:19 :1370.9 Mb self.full_data_pretrain_n = full_data_pretrain_n
- distances.mlp_base __init__:20 :1370.9 Mb self._inited = False
- distances.mlp_base __init__:21 :1370.9 Mb self._step_every_repeat_n = step_every_repeat_n
- distances.mlp_base __init__:22 :1370.9 Mb self._shapes = shapes
- distances.mlp_base __init__:23 :1370.9 Mb self._optimizer_builder = optimizer_builder
- distances.mlp_base __init__:24 :1370.9 Mb self._initializer = initializer
- distances.mlp_base __init__:25 :1370.9 Mb self._attempt_on_forward = attempt_update_on_forward
- distances.mlp_base __init__:26 :1370.9 Mb self._update_attempt_counter = 0
- distances.mlp_base __init__:27 :1370.9 Mb self._start_from_n_updates = start_from_n_updates
- distances.mlp_base __init__:28 :1370.9 Mb self._sub_init(kwargs)
- distances.mlp_base _sub_init:50 :1370.9 Mb pass
- distances.mlp_base __init__:29 :1370.9 Mb self.draw_method_list.append(self.draw_contour)
- distances.mlp __init__:17 :1370.9 Mb self.stored_y = None
- __main__ run_adaptation:321 :1370.9 Mb optimizer = setup['optimizer'](target_model.parameters())
- __main__ run_adaptation:322 :1370.9 Mb if share_embedding:
- __main__ run_adaptation:323 :1370.9 Mb base_optimizer = setup['optimizer'](base_model.parameters())
- __main__ run_adaptation:325 :1370.9 Mb train_loaders = setup['data']()
- setup_config <lambda>:66 :1370.9 Mb 'data': lambda: read_pytorch(**setup_dict['data']),
- read_data read_pytorch:107 :1370.9 Mb if dataset == 'svhn2mnist':
- read_data read_pytorch:109 :1370.9 Mb elif dataset == 'gta2city':
- read_data read_pytorch:110 :1370.9 Mb return read_gta2city(batch_size, cuda, same_batch_n, shuffle, **kwargs)
- read_data read_gta2city:210 :1370.9 Mb kwargs = {'num_workers': workers, 'pin_memory': pin_memory} if cuda else {}
- read_data read_gta2city:212 :1370.9 Mb assert not (batch_size > 0 and same_batch_n > 1), \
- read_data read_gta2city:215 :1370.9 Mb if batch_size > 0 and same_batch_n == 1:
- read_data read_gta2city:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:224 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:224 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:224 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:236 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:236 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:236 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:241 :1370.9 Mb return source_loader, target_loader
- __main__ run_adaptation:326 :1370.9 Mb eval_loaders = setup['data']()
- setup_config <lambda>:66 :1370.9 Mb 'data': lambda: read_pytorch(**setup_dict['data']),
- read_data read_pytorch:107 :1370.9 Mb if dataset == 'svhn2mnist':
- read_data read_pytorch:109 :1370.9 Mb elif dataset == 'gta2city':
- read_data read_pytorch:110 :1370.9 Mb return read_gta2city(batch_size, cuda, same_batch_n, shuffle, **kwargs)
- read_data read_gta2city:210 :1370.9 Mb kwargs = {'num_workers': workers, 'pin_memory': pin_memory} if cuda else {}
- read_data read_gta2city:212 :1370.9 Mb assert not (batch_size > 0 and same_batch_n > 1), \
- read_data read_gta2city:215 :1370.9 Mb if batch_size > 0 and same_batch_n == 1:
- read_data read_gta2city:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:224 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:224 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:224 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:216 :1370.9 Mb source_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:236 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:236 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data <dictcomp>:236 :1370.9 Mb for split_name in ['train', 'test']
- read_data <dictcomp>:227 :1370.9 Mb target_loader: Mapping[str, torch.utils.data.DataLoader] = {
- read_data read_gta2city:241 :1370.9 Mb return source_loader, target_loader
- __main__ run_adaptation:328 :1370.9 Mb with stdout_tee:
- utils.tee_stdout tee_stdout:22 :1370.9 Mb print('teeing std to', f_names)
- utils.tee_stdout tee_stdout:23 :1370.9 Mb f_objects = [open(f_name, 'w') for f_name in f_names]
- utils.tee_stdout <listcomp>:23 :1370.9 Mb f_objects = [open(f_name, 'w') for f_name in f_names]
- utils.tee_stdout <listcomp>:23 :1370.9 Mb f_objects = [open(f_name, 'w') for f_name in f_names]
- utils.tee_stdout tee_stdout:24 :1370.9 Mb with contextlib.redirect_stdout(MergedIO(sys.stdout, *f_objects)):
- utils.tee_stdout __init__:9 :1370.9 Mb self._file_objects = file_objects
- utils.tee_stdout tee_stdout:25 :1370.9 Mb yield
- __main__ run_adaptation:329 :1370.9 Mb print(conf)
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- __main__ run_adaptation:330 :1370.9 Mb print(snapshot_name)
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- __main__ run_adaptation:332 :1370.9 Mb run_iterations(distance_function, distance_name, eval_loaders, shift,
- __main__ run_adaptation:333 :1370.9 Mb train_loaders)
- __main__ run_iterations:340 :1370.9 Mb for sub_epoch_i in range(total_epochs):
- __main__ run_iterations:348 :1370.9 Mb if eval_on_first_iter is True and sub_epoch_i == 0:
- __main__ run_iterations:351 :1370.9 Mb print(f'{sub_epoch_i:>3} updating')
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- __main__ run_iterations:353 :1370.9 Mb if sub_epoch_i == 0:
- __main__ run_iterations:354 :1370.9 Mb repeat_learning_i = getattr(distance_function, 'full_data_pretrain_n', 1)
- __main__ run_iterations:358 :1370.9 Mb for sub_repeat in range(repeat_learning_i):
- __main__ run_iterations:359 :1370.9 Mb pretraining = repeat_learning_i > 1
- __main__ run_iterations:360 :1370.9 Mb sub_match_iterator = match_source_target(
- __main__ run_iterations:361 :1370.9 Mb sub_epoch_i, train_loaders, distance_function, shift=shift,
- __main__ run_iterations:362 :1370.9 Mb discriminator_only=pretraining
- __main__ run_iterations:365 :1370.9 Mb print('supervised update')
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- utils.tee_stdout write:13 :1370.9 Mb f.write(string)
- utils.tee_stdout write:14 :1370.9 Mb f.flush()
- utils.tee_stdout write:12 :1370.9 Mb for f in self._file_objects:
- __main__ run_iterations:367 :1370.9 Mb if share_embedding is True:
- __main__ run_iterations:368 :1370.9 Mb sub_base_iterator = supervised_source_update(train_loaders[0]['train'])
- __main__ run_iterations:369 :1370.9 Mb sub_iterator = zip(sub_match_iterator, sub_base_iterator)
- __main__ run_iterations:373 :1370.9 Mb for __ in sub_iterator:
- __main__ match_source_target:107 :1370.9 Mb dist_func.train(True)
- __main__ match_source_target:109 :1370.9 Mb discriminator_needs_update = hasattr(dist_func, 'attempt_update_d')
- __main__ match_source_target:111 :1370.9 Mb if not discriminator_needs_update and discriminator_only:
- __main__ match_source_target:115 :1370.9 Mb if discriminator_only is True:
- __main__ match_source_target:118 :1370.9 Mb data_len = len(loaders[0]['train'])
- __main__ match_source_target:119 :1370.9 Mb yield_every = data_len // update_report_per_epoch
- __main__ match_source_target:120 :1370.9 Mb adaptation_iterator = enumerate(zip(loaders[0]['train'], loaders[1]['train']))
- __main__ match_source_target:121 :1370.9 Mb tqdm_adaptation_iterator = tqdm(adaptation_iterator, total=data_len, desc='match')
- __main__ match_source_target:122 :1370.9 Mb for batch_idx, ((source_data, s_y), (target_data, ___)) in tqdm_adaptation_iterator:
- __main__ match_source_target:123 :1370.9 Mb if source_data.numel == 0:
- __main__ match_source_target:127 :1370.9 Mb dist_func.current_pos = (epoch_i, batch_idx, batch_idx)
- __main__ match_source_target:128 :1370.9 Mb dist_func.y_a = s_y
- __main__ match_source_target:130 :1370.9 Mb data_s = Variable(maybe_cuda(source_data))
- start maybe_cuda:85 :1370.9 Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:131 :1370.9 Mb data_t = Variable(maybe_cuda(target_data))
- start maybe_cuda:85 :1370.9 Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:134 :1370.9 Mb if discriminator_needs_update is True:
- __main__ match_source_target:136 :1370.9 Mb feature_apply(partial(dist_func.attempt_update_d, epoch_i=epoch_i),
- __main__ match_source_target:137 :1370.9 Mb base_model, target_model, data_s, data_t, shift)
- distances.utils feature_apply:103 :1370.9 Mb a_phi = model_a.features_at(input_a, shift)
- model.fcn16 features_at:16 :1370.9 Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :1370.9 Mb h = x
- model.fcn16 features_at:19 :1370.9 Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :2118.9 Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :2764.9 Mb h = self.pool1(h)
- model.fcn16 features_at:23 :3088.9 Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :3412.9 Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :3736.9 Mb h = self.pool2(h)
- model.fcn16 features_at:27 :3898.9 Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :4060.9 Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :4222.9 Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :4384.9 Mb h = self.pool3(h)
- model.fcn16 features_at:32 :4384.9 Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :4466.9 Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :4548.9 Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :4630.9 Mb h = self.pool4(h)
- model.fcn16 features_at:37 :4672.9 Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :4694.9 Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :4716.9 Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :4738.9 Mb h = self.pool5(h)
- model.fcn16 features_at:42 :4738.9 Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :4966.9 Mb h = self.drop6(h)
- model.fcn16 features_at:45 :4966.9 Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :4966.9 Mb h = self.drop7(h)
- model.fcn16 features_at:48 :4966.9 Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :4966.9 Mb return h
- distances.utils feature_apply:104 :4966.9 Mb b_phi = model_b.features_at(input_b, shift)
- model.fcn16 features_at:16 :4966.9 Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :4966.9 Mb h = x
- model.fcn16 features_at:19 :4966.9 Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :5612.9 Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :6258.9 Mb h = self.pool1(h)
- model.fcn16 features_at:23 :6744.9 Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :7068.9 Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :7392.9 Mb h = self.pool2(h)
- model.fcn16 features_at:27 :7554.9 Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :7716.9 Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :7878.9 Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :8040.9 Mb h = self.pool3(h)
- model.fcn16 features_at:32 :8164.9 Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :8246.9 Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :8328.9 Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :8410.9 Mb h = self.pool4(h)
- model.fcn16 features_at:37 :8474.9 Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :8496.9 Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :8518.9 Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :8540.9 Mb h = self.pool5(h)
- model.fcn16 features_at:42 :8540.9 Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :8768.9 Mb h = self.drop6(h)
- model.fcn16 features_at:45 :8768.9 Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :8768.9 Mb h = self.drop7(h)
- model.fcn16 features_at:48 :8768.9 Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :8768.9 Mb return h
- distances.utils feature_apply:105 :8768.9 Mb dist_val = apply_func(a_phi, b_phi)
- distances.mlp_base attempt_update_d:62 :8768.9 Mb self._check_inputs(features_a, features_b)
- distances.mlp_fcn _check_inputs:32 :8768.9 Mb self._maybe_init(features_a)
- distances.mlp_fcn _maybe_init:28 :8768.9 Mb if not self._inited:
- distances.mlp_fcn _maybe_init:29 :8768.9 Mb self._init_net(some_feature_input.size()[1:])
- distances.mlp_fcn _init_net:37 :8768.9 Mb if self._shapes is not None:
- distances.mlp_fcn _init_net:38 :8768.9 Mb used_kern_sizes = (n_features[0],) + tuple(self._shapes['channels'])
- distances.mlp_fcn _init_net:39 :8768.9 Mb self.cnn_net = self.classifier = nn.Sequential(
- distances.mlp_fcn _init_net:40 :8768.9 Mb *chain.from_iterable(
- distances.mlp_fcn _init_net:41 :8768.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:45 :8768.9 Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn _init_net:47 :8768.9 Mb Flatten()
- distances.mlp_fcn <genexpr>:41 :8768.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :8768.9 Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :8768.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :8768.9 Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :8768.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:50 :8768.9 Mb test_pass_var = Variable(torch.zeros(1, *n_features), volatile=True).cuda()
- distances.mlp_fcn _init_net:51 :8768.9 Mb feature_n = self.cnn_net(test_pass_var).size(1) # because [1, F*H*W]
- distances.mlp_fcn forward:17 :8768.9 Mb return X.view(X.size(0), -1)
- distances.mlp_fcn _init_net:52 :8768.9 Mb self.net = nn.Sequential(
- distances.mlp_fcn _init_net:53 :8768.9 Mb self.cnn_net,
- distances.mlp_fcn _init_net:54 :8768.9 Mb nn.Linear(feature_n, 1)
- distances.mlp_fcn _init_net:57 :8768.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :8768.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :8768.9 Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :8768.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :8768.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :8768.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :8768.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :8768.9 Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :8768.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :8768.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :8768.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :8768.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :8768.9 Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :8768.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :8768.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :8768.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:61 :8768.9 Mb self._d_optimizer = self._optimizer_builder(self.parameters())
- distances.mlp_fcn _init_net:62 :8768.9 Mb self._n_features = n_features
- distances.mlp_fcn _init_net:63 :8768.9 Mb self.__inited = True
- distances.mlp_fcn _check_inputs:33 :8768.9 Mb assert features_a.size()[1:] == features_b.size()[1:] == self._n_features
- distances.mlp_base attempt_update_d:64 :8768.9 Mb if self._start_from_n_updates > 0 and epoch_i == 0:
- distances.mlp_base attempt_update_d:73 :8768.9 Mb if (self._step_every_repeat_n[0] > 0
- distances.mlp_base attempt_update_d:74 :8768.9 Mb and self._update_attempt_counter % self._step_every_repeat_n[0] == 0):
- distances.mlp_base attempt_update_d:75 :8768.9 Mb for _ in range(self._step_every_repeat_n[1]):
- distances.mlp_base attempt_update_d:76 :8768.9 Mb self.d_update_step(features_a, features_b)
- distances.mlp_base d_update_step:54 :8768.9 Mb cross_ent = self.objective(features_a, features_b)
- distances.mlp objective:21 :8768.9 Mb full_x = torch.cat([features_a, features_b], 0)
- distances.mlp objective:23 :8768.9 Mb if self.stored_y is None:
- distances.mlp objective:24 :8768.9 Mb to_concat = [torch.ones(features_a.size(0)), torch.zeros(features_b.size(0))]
- distances.mlp objective:25 :8768.9 Mb _y = Variable(torch.cat(to_concat).cuda())
- distances.mlp objective:26 :8768.9 Mb self.stored_y = _y
- distances.mlp objective:30 :8768.9 Mb full_y = _y if not inverse_labels else (1 - _y)
- distances.mlp objective:31 :8768.9 Mb output = self.network_forward(full_x)
- distances.mlp_base network_forward:37 :8768.9 Mb return self.net(x)
- distances.mlp_fcn forward:17 :8768.9 Mb return X.view(X.size(0), -1)
- distances.mlp objective:32 :8798.9 Mb cross_ent = F.binary_cross_entropy(F.sigmoid(output.view(-1)), full_y.view(-1))
- distances.mlp objective:33 :8818.9 Mb assert not has_nan(cross_ent)
- distances.utils has_nan:83 :8818.9 Mb if type(x) is float:
- distances.utils has_nan:86 :8818.9 Mb x = x.data if type(x) is Variable else x
- distances.utils has_nan:87 :8818.9 Mb return ((float(torch.sum(x != x)) > 0)
- distances.utils has_nan:88 :8818.9 Mb or float(torch.sum(x == np.inf)) > 0)
- distances.mlp objective:34 :8818.9 Mb return cross_ent
- distances.mlp_base d_update_step:56 :8818.9 Mb self._d_optimizer.zero_grad()
- distances.mlp_base d_update_step:58 :8818.9 Mb cross_ent.backward()
- distances.mlp_base d_update_step:59 :6284.9 Mb self._d_optimizer.step()
- distances.mlp_base attempt_update_d:77 :6284.9 Mb self._update_attempt_counter += 1
- distances.mlp_base attempt_update_d:75 :6284.9 Mb for _ in range(self._step_every_repeat_n[1]):
- distances.mlp_base attempt_update_d:78 :6284.9 Mb return
- distances.utils feature_apply:106 :6284.9 Mb return dist_val
- __main__ match_source_target:139 :6284.9 Mb del data_s, data_t
- __main__ match_source_target:140 :6284.9 Mb data_s = Variable(maybe_cuda(source_data))
- start maybe_cuda:85 :6284.9 Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:141 :6284.9 Mb data_t = Variable(maybe_cuda(target_data))
- start maybe_cuda:85 :6284.9 Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:145 :6284.9 Mb if not discriminator_only:
- __main__ match_source_target:146 :6284.9 Mb dist_val = feature_apply(dist_func,
- __main__ match_source_target:147 :6284.9 Mb base_model, target_model, data_s, data_t, shift)
- distances.utils feature_apply:103 :6284.9 Mb a_phi = model_a.features_at(input_a, shift)
- model.fcn16 features_at:16 :6284.9 Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :6284.9 Mb h = x
- model.fcn16 features_at:19 :6284.9 Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :6284.9 Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :6284.9 Mb h = self.pool1(h)
- model.fcn16 features_at:23 :6284.9 Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :6284.9 Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :6284.9 Mb h = self.pool2(h)
- model.fcn16 features_at:27 :6284.9 Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :6284.9 Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :6284.9 Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :6284.9 Mb h = self.pool3(h)
- model.fcn16 features_at:32 :6284.9 Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :6284.9 Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :6284.9 Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :6284.9 Mb h = self.pool4(h)
- model.fcn16 features_at:37 :6284.9 Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :6284.9 Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :6284.9 Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :6284.9 Mb h = self.pool5(h)
- model.fcn16 features_at:42 :6284.9 Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :6284.9 Mb h = self.drop6(h)
- model.fcn16 features_at:45 :6284.9 Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :6284.9 Mb h = self.drop7(h)
- model.fcn16 features_at:48 :6284.9 Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :6284.9 Mb return h
- distances.utils feature_apply:104 :6284.9 Mb b_phi = model_b.features_at(input_b, shift)
- model.fcn16 features_at:16 :6284.9 Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :6284.9 Mb h = x
- model.fcn16 features_at:19 :6284.9 Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :6284.9 Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :6930.9 Mb h = self.pool1(h)
- model.fcn16 features_at:23 :7254.9 Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :7578.9 Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :7902.9 Mb h = self.pool2(h)
- model.fcn16 features_at:27 :8146.9 Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :8308.9 Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :8470.9 Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :8632.9 Mb h = self.pool3(h)
- model.fcn16 features_at:32 :8756.9 Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :8838.9 Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :8920.9 Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :9002.9 Mb h = self.pool4(h)
- model.fcn16 features_at:37 :9044.9 Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :9044.9 Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :9066.9 Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :9088.9 Mb h = self.pool5(h)
- model.fcn16 features_at:42 :9088.9 Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :9316.9 Mb h = self.drop6(h)
- model.fcn16 features_at:45 :9316.9 Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :9316.9 Mb h = self.drop7(h)
- model.fcn16 features_at:48 :9316.9 Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :9316.9 Mb return h
- distances.utils feature_apply:105 :9316.9 Mb dist_val = apply_func(a_phi, b_phi)
- distances.mlp_base forward:40 :9316.9 Mb self._check_inputs(features_a, features_b)
- distances.mlp_fcn _check_inputs:32 :9316.9 Mb self._maybe_init(features_a)
- distances.mlp_fcn _maybe_init:28 :9316.9 Mb if not self._inited:
- distances.mlp_fcn _maybe_init:29 :9316.9 Mb self._init_net(some_feature_input.size()[1:])
- distances.mlp_fcn _init_net:37 :9316.9 Mb if self._shapes is not None:
- distances.mlp_fcn _init_net:38 :9316.9 Mb used_kern_sizes = (n_features[0],) + tuple(self._shapes['channels'])
- distances.mlp_fcn _init_net:39 :9316.9 Mb self.cnn_net = self.classifier = nn.Sequential(
- distances.mlp_fcn _init_net:40 :9316.9 Mb *chain.from_iterable(
- distances.mlp_fcn _init_net:41 :9316.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:45 :9316.9 Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn _init_net:47 :9316.9 Mb Flatten()
- distances.mlp_fcn <genexpr>:41 :9316.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :9316.9 Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :9316.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :9316.9 Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :9316.9 Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:50 :9316.9 Mb test_pass_var = Variable(torch.zeros(1, *n_features), volatile=True).cuda()
- distances.mlp_fcn _init_net:51 :9316.9 Mb feature_n = self.cnn_net(test_pass_var).size(1) # because [1, F*H*W]
- distances.mlp_fcn forward:17 :9316.9 Mb return X.view(X.size(0), -1)
- distances.mlp_fcn _init_net:52 :9316.9 Mb self.net = nn.Sequential(
- distances.mlp_fcn _init_net:53 :9316.9 Mb self.cnn_net,
- distances.mlp_fcn _init_net:54 :9316.9 Mb nn.Linear(feature_n, 1)
- distances.mlp_fcn _init_net:57 :9316.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :9316.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :9316.9 Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :9316.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :9316.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :9316.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :9316.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :9316.9 Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :9316.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :9316.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :9316.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :9316.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :9316.9 Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :9316.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :9316.9 Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :9316.9 Mb for w in self.parameters():
- distances.mlp_fcn _init_net:61 :9316.9 Mb self._d_optimizer = self._optimizer_builder(self.parameters())
- distances.mlp_fcn _init_net:62 :9316.9 Mb self._n_features = n_features
- distances.mlp_fcn _init_net:63 :9316.9 Mb self.__inited = True
- distances.mlp_fcn _check_inputs:33 :9316.9 Mb assert features_a.size()[1:] == features_b.size()[1:] == self._n_features
- distances.mlp_base forward:42 :9316.9 Mb if self._attempt_on_forward is True:
- distances.mlp_base forward:46 :9316.9 Mb inv_label_cross_ent = self.objective(features_a, features_b, inverse_labels=inv)
- distances.mlp objective:21 :9316.9 Mb full_x = torch.cat([features_a, features_b], 0)
- distances.mlp objective:23 :9316.9 Mb if self.stored_y is None:
- distances.mlp objective:28 :9316.9 Mb _y = self.stored_y
- distances.mlp objective:30 :9316.9 Mb full_y = _y if not inverse_labels else (1 - _y)
- distances.mlp objective:31 :9316.9 Mb output = self.network_forward(full_x)
- distances.mlp_base network_forward:37 :9316.9 Mb return self.net(x)
- distances.mlp_fcn forward:17 :9316.9 Mb return X.view(X.size(0), -1)
- distances.mlp objective:32 :9316.9 Mb cross_ent = F.binary_cross_entropy(F.sigmoid(output.view(-1)), full_y.view(-1))
- distances.mlp objective:33 :9316.9 Mb assert not has_nan(cross_ent)
- distances.utils has_nan:83 :9316.9 Mb if type(x) is float:
- distances.utils has_nan:86 :9316.9 Mb x = x.data if type(x) is Variable else x
- distances.utils has_nan:87 :9316.9 Mb return ((float(torch.sum(x != x)) > 0)
- distances.utils has_nan:88 :9316.9 Mb or float(torch.sum(x == np.inf)) > 0)
- distances.mlp objective:34 :9316.9 Mb return cross_ent
- distances.mlp_base forward:47 :9316.9 Mb return inv_label_cross_ent
- distances.utils feature_apply:106 :9316.9 Mb return dist_val
- __main__ match_source_target:149 :9316.9 Mb optimizer.zero_grad()
- __main__ match_source_target:150 :9316.9 Mb dist_val.backward()
- __main__ match_source_target:151 :10620.9Mb optimizer.step()
- __main__ match_source_target:154 :10620.9Mb if (batch_idx + 1) % yield_every == 0:
- __main__ match_source_target:122 :10620.9Mb for batch_idx, ((source_data, s_y), (target_data, ___)) in tqdm_adaptation_iterator:
- __main__ match_source_target:123 :10620.9Mb if source_data.numel == 0:
- __main__ match_source_target:127 :10620.9Mb dist_func.current_pos = (epoch_i, batch_idx, batch_idx)
- __main__ match_source_target:128 :10620.9Mb dist_func.y_a = s_y
- __main__ match_source_target:130 :10620.9Mb data_s = Variable(maybe_cuda(source_data))
- start maybe_cuda:85 :10620.9Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:131 :10620.9Mb data_t = Variable(maybe_cuda(target_data))
- start maybe_cuda:85 :10620.9Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:134 :10620.9Mb if discriminator_needs_update is True:
- __main__ match_source_target:136 :10620.9Mb feature_apply(partial(dist_func.attempt_update_d, epoch_i=epoch_i),
- __main__ match_source_target:137 :10620.9Mb base_model, target_model, data_s, data_t, shift)
- distances.utils feature_apply:103 :10620.9Mb a_phi = model_a.features_at(input_a, shift)
- model.fcn16 features_at:16 :10620.9Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :10620.9Mb h = x
- model.fcn16 features_at:19 :10620.9Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :10620.9Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :10620.9Mb h = self.pool1(h)
- model.fcn16 features_at:23 :10620.9Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :10620.9Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :10620.9Mb h = self.pool2(h)
- model.fcn16 features_at:27 :10620.9Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :10620.9Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :10620.9Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :10620.9Mb h = self.pool3(h)
- model.fcn16 features_at:32 :10620.9Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :10620.9Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :10620.9Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :10620.9Mb h = self.pool4(h)
- model.fcn16 features_at:37 :10620.9Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :10620.9Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :10620.9Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :10620.9Mb h = self.pool5(h)
- model.fcn16 features_at:42 :10620.9Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :10620.9Mb h = self.drop6(h)
- model.fcn16 features_at:45 :10620.9Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :10620.9Mb h = self.drop7(h)
- model.fcn16 features_at:48 :10620.9Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :10620.9Mb return h
- distances.utils feature_apply:104 :10620.9Mb b_phi = model_b.features_at(input_b, shift)
- model.fcn16 features_at:16 :10620.9Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :10620.9Mb h = x
- model.fcn16 features_at:19 :10620.9Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :10620.9Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :10620.9Mb h = self.pool1(h)
- model.fcn16 features_at:23 :10620.9Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :10620.9Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :10620.9Mb h = self.pool2(h)
- model.fcn16 features_at:27 :10620.9Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :10620.9Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :10620.9Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :10620.9Mb h = self.pool3(h)
- model.fcn16 features_at:32 :10620.9Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :10620.9Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :10620.9Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :10620.9Mb h = self.pool4(h)
- model.fcn16 features_at:37 :10620.9Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :10620.9Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :10620.9Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :10620.9Mb h = self.pool5(h)
- model.fcn16 features_at:42 :10620.9Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :10620.9Mb h = self.drop6(h)
- model.fcn16 features_at:45 :10620.9Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :10620.9Mb h = self.drop7(h)
- model.fcn16 features_at:48 :10620.9Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :10620.9Mb return h
- distances.utils feature_apply:105 :10620.9Mb dist_val = apply_func(a_phi, b_phi)
- distances.mlp_base attempt_update_d:62 :10620.9Mb self._check_inputs(features_a, features_b)
- distances.mlp_fcn _check_inputs:32 :10620.9Mb self._maybe_init(features_a)
- distances.mlp_fcn _maybe_init:28 :10620.9Mb if not self._inited:
- distances.mlp_fcn _maybe_init:29 :10620.9Mb self._init_net(some_feature_input.size()[1:])
- distances.mlp_fcn _init_net:37 :10620.9Mb if self._shapes is not None:
- distances.mlp_fcn _init_net:38 :10620.9Mb used_kern_sizes = (n_features[0],) + tuple(self._shapes['channels'])
- distances.mlp_fcn _init_net:39 :10620.9Mb self.cnn_net = self.classifier = nn.Sequential(
- distances.mlp_fcn _init_net:40 :10620.9Mb *chain.from_iterable(
- distances.mlp_fcn _init_net:41 :10620.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:45 :10620.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn _init_net:47 :10620.9Mb Flatten()
- distances.mlp_fcn <genexpr>:41 :10620.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :10620.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :10620.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :10620.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :10620.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:50 :10620.9Mb test_pass_var = Variable(torch.zeros(1, *n_features), volatile=True).cuda()
- distances.mlp_fcn _init_net:51 :10620.9Mb feature_n = self.cnn_net(test_pass_var).size(1) # because [1, F*H*W]
- distances.mlp_fcn forward:17 :10620.9Mb return X.view(X.size(0), -1)
- distances.mlp_fcn _init_net:52 :10620.9Mb self.net = nn.Sequential(
- distances.mlp_fcn _init_net:53 :10620.9Mb self.cnn_net,
- distances.mlp_fcn _init_net:54 :10620.9Mb nn.Linear(feature_n, 1)
- distances.mlp_fcn _init_net:57 :10620.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :10620.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :10620.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :10620.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :10620.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :10620.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :10620.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :10620.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :10620.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :10620.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :10620.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :10620.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :10620.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :10620.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :10620.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :10620.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:61 :10620.9Mb self._d_optimizer = self._optimizer_builder(self.parameters())
- distances.mlp_fcn _init_net:62 :10620.9Mb self._n_features = n_features
- distances.mlp_fcn _init_net:63 :10620.9Mb self.__inited = True
- distances.mlp_fcn _check_inputs:33 :10620.9Mb assert features_a.size()[1:] == features_b.size()[1:] == self._n_features
- distances.mlp_base attempt_update_d:64 :10620.9Mb if self._start_from_n_updates > 0 and epoch_i == 0:
- distances.mlp_base attempt_update_d:73 :10620.9Mb if (self._step_every_repeat_n[0] > 0
- distances.mlp_base attempt_update_d:74 :10620.9Mb and self._update_attempt_counter % self._step_every_repeat_n[0] == 0):
- distances.mlp_base attempt_update_d:75 :10620.9Mb for _ in range(self._step_every_repeat_n[1]):
- distances.mlp_base attempt_update_d:76 :10620.9Mb self.d_update_step(features_a, features_b)
- distances.mlp_base d_update_step:54 :10620.9Mb cross_ent = self.objective(features_a, features_b)
- distances.mlp objective:21 :10620.9Mb full_x = torch.cat([features_a, features_b], 0)
- distances.mlp objective:23 :10620.9Mb if self.stored_y is None:
- distances.mlp objective:28 :10620.9Mb _y = self.stored_y
- distances.mlp objective:30 :10620.9Mb full_y = _y if not inverse_labels else (1 - _y)
- distances.mlp objective:31 :10620.9Mb output = self.network_forward(full_x)
- distances.mlp_base network_forward:37 :10620.9Mb return self.net(x)
- distances.mlp_fcn forward:17 :10620.9Mb return X.view(X.size(0), -1)
- distances.mlp objective:32 :10620.9Mb cross_ent = F.binary_cross_entropy(F.sigmoid(output.view(-1)), full_y.view(-1))
- distances.mlp objective:33 :10620.9Mb assert not has_nan(cross_ent)
- distances.utils has_nan:83 :10620.9Mb if type(x) is float:
- distances.utils has_nan:86 :10620.9Mb x = x.data if type(x) is Variable else x
- distances.utils has_nan:87 :10620.9Mb return ((float(torch.sum(x != x)) > 0)
- distances.utils has_nan:88 :10620.9Mb or float(torch.sum(x == np.inf)) > 0)
- distances.mlp objective:34 :10620.9Mb return cross_ent
- distances.mlp_base d_update_step:56 :10620.9Mb self._d_optimizer.zero_grad()
- distances.mlp_base d_update_step:58 :10620.9Mb cross_ent.backward()
- distances.mlp_base d_update_step:59 :11404.9Mb self._d_optimizer.step()
- distances.mlp_base attempt_update_d:77 :11404.9Mb self._update_attempt_counter += 1
- distances.mlp_base attempt_update_d:75 :11404.9Mb for _ in range(self._step_every_repeat_n[1]):
- distances.mlp_base attempt_update_d:78 :11404.9Mb return
- distances.utils feature_apply:106 :11404.9Mb return dist_val
- __main__ match_source_target:139 :11404.9Mb del data_s, data_t
- __main__ match_source_target:140 :11404.9Mb data_s = Variable(maybe_cuda(source_data))
- start maybe_cuda:85 :11404.9Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:141 :11404.9Mb data_t = Variable(maybe_cuda(target_data))
- start maybe_cuda:85 :11404.9Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:145 :11404.9Mb if not discriminator_only:
- __main__ match_source_target:146 :11404.9Mb dist_val = feature_apply(dist_func,
- __main__ match_source_target:147 :11404.9Mb base_model, target_model, data_s, data_t, shift)
- distances.utils feature_apply:103 :11404.9Mb a_phi = model_a.features_at(input_a, shift)
- model.fcn16 features_at:16 :11404.9Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :11404.9Mb h = x
- model.fcn16 features_at:19 :11404.9Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :11404.9Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :11404.9Mb h = self.pool1(h)
- model.fcn16 features_at:23 :11404.9Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :11404.9Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :11404.9Mb h = self.pool2(h)
- model.fcn16 features_at:27 :11404.9Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :11404.9Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :11404.9Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :11404.9Mb h = self.pool3(h)
- model.fcn16 features_at:32 :11404.9Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :11404.9Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :11404.9Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :11404.9Mb h = self.pool4(h)
- model.fcn16 features_at:37 :11404.9Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :11404.9Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :11404.9Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :11404.9Mb h = self.pool5(h)
- model.fcn16 features_at:42 :11404.9Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :11404.9Mb h = self.drop6(h)
- model.fcn16 features_at:45 :11404.9Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :11404.9Mb h = self.drop7(h)
- model.fcn16 features_at:48 :11404.9Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :11404.9Mb return h
- distances.utils feature_apply:104 :11404.9Mb b_phi = model_b.features_at(input_b, shift)
- model.fcn16 features_at:16 :11404.9Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :11404.9Mb h = x
- model.fcn16 features_at:19 :11404.9Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :11404.9Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :11404.9Mb h = self.pool1(h)
- model.fcn16 features_at:23 :11404.9Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :11404.9Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :11404.9Mb h = self.pool2(h)
- model.fcn16 features_at:27 :11404.9Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :11404.9Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :11404.9Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :11404.9Mb h = self.pool3(h)
- model.fcn16 features_at:32 :11404.9Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :11404.9Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :11404.9Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :11404.9Mb h = self.pool4(h)
- model.fcn16 features_at:37 :11404.9Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :11404.9Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :11404.9Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :11404.9Mb h = self.pool5(h)
- model.fcn16 features_at:42 :11404.9Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :11404.9Mb h = self.drop6(h)
- model.fcn16 features_at:45 :11404.9Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :11404.9Mb h = self.drop7(h)
- model.fcn16 features_at:48 :11404.9Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :11404.9Mb return h
- distances.utils feature_apply:105 :11404.9Mb dist_val = apply_func(a_phi, b_phi)
- distances.mlp_base forward:40 :11404.9Mb self._check_inputs(features_a, features_b)
- distances.mlp_fcn _check_inputs:32 :11404.9Mb self._maybe_init(features_a)
- distances.mlp_fcn _maybe_init:28 :11404.9Mb if not self._inited:
- distances.mlp_fcn _maybe_init:29 :11404.9Mb self._init_net(some_feature_input.size()[1:])
- distances.mlp_fcn _init_net:37 :11404.9Mb if self._shapes is not None:
- distances.mlp_fcn _init_net:38 :11404.9Mb used_kern_sizes = (n_features[0],) + tuple(self._shapes['channels'])
- distances.mlp_fcn _init_net:39 :11404.9Mb self.cnn_net = self.classifier = nn.Sequential(
- distances.mlp_fcn _init_net:40 :11404.9Mb *chain.from_iterable(
- distances.mlp_fcn _init_net:41 :11404.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:45 :11404.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn _init_net:47 :11404.9Mb Flatten()
- distances.mlp_fcn <genexpr>:41 :11404.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :11404.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :11404.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :11404.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :11404.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:50 :11404.9Mb test_pass_var = Variable(torch.zeros(1, *n_features), volatile=True).cuda()
- distances.mlp_fcn _init_net:51 :11404.9Mb feature_n = self.cnn_net(test_pass_var).size(1) # because [1, F*H*W]
- distances.mlp_fcn forward:17 :11404.9Mb return X.view(X.size(0), -1)
- distances.mlp_fcn _init_net:52 :11404.9Mb self.net = nn.Sequential(
- distances.mlp_fcn _init_net:53 :11404.9Mb self.cnn_net,
- distances.mlp_fcn _init_net:54 :11404.9Mb nn.Linear(feature_n, 1)
- distances.mlp_fcn _init_net:57 :11404.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11404.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :11404.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :11404.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11404.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :11404.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11404.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :11404.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :11404.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11404.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :11404.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11404.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :11404.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :11404.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11404.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :11404.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:61 :11404.9Mb self._d_optimizer = self._optimizer_builder(self.parameters())
- distances.mlp_fcn _init_net:62 :11404.9Mb self._n_features = n_features
- distances.mlp_fcn _init_net:63 :11404.9Mb self.__inited = True
- distances.mlp_fcn _check_inputs:33 :11404.9Mb assert features_a.size()[1:] == features_b.size()[1:] == self._n_features
- distances.mlp_base forward:42 :11404.9Mb if self._attempt_on_forward is True:
- distances.mlp_base forward:46 :11404.9Mb inv_label_cross_ent = self.objective(features_a, features_b, inverse_labels=inv)
- distances.mlp objective:21 :11404.9Mb full_x = torch.cat([features_a, features_b], 0)
- distances.mlp objective:23 :11404.9Mb if self.stored_y is None:
- distances.mlp objective:28 :11404.9Mb _y = self.stored_y
- distances.mlp objective:30 :11404.9Mb full_y = _y if not inverse_labels else (1 - _y)
- distances.mlp objective:31 :11404.9Mb output = self.network_forward(full_x)
- distances.mlp_base network_forward:37 :11404.9Mb return self.net(x)
- distances.mlp_fcn forward:17 :11404.9Mb return X.view(X.size(0), -1)
- distances.mlp objective:32 :11404.9Mb cross_ent = F.binary_cross_entropy(F.sigmoid(output.view(-1)), full_y.view(-1))
- distances.mlp objective:33 :11404.9Mb assert not has_nan(cross_ent)
- distances.utils has_nan:83 :11404.9Mb if type(x) is float:
- distances.utils has_nan:86 :11404.9Mb x = x.data if type(x) is Variable else x
- distances.utils has_nan:87 :11404.9Mb return ((float(torch.sum(x != x)) > 0)
- distances.utils has_nan:88 :11404.9Mb or float(torch.sum(x == np.inf)) > 0)
- distances.mlp objective:34 :11404.9Mb return cross_ent
- distances.mlp_base forward:47 :11404.9Mb return inv_label_cross_ent
- distances.utils feature_apply:106 :11404.9Mb return dist_val
- __main__ match_source_target:149 :11404.9Mb optimizer.zero_grad()
- __main__ match_source_target:150 :11404.9Mb dist_val.backward()
- __main__ match_source_target:151 :11796.9Mb optimizer.step()
- __main__ match_source_target:154 :11796.9Mb if (batch_idx + 1) % yield_every == 0:
- __main__ match_source_target:122 :11796.9Mb for batch_idx, ((source_data, s_y), (target_data, ___)) in tqdm_adaptation_iterator:
- __main__ match_source_target:123 :11796.9Mb if source_data.numel == 0:
- __main__ match_source_target:127 :11796.9Mb dist_func.current_pos = (epoch_i, batch_idx, batch_idx)
- __main__ match_source_target:128 :11796.9Mb dist_func.y_a = s_y
- __main__ match_source_target:130 :11796.9Mb data_s = Variable(maybe_cuda(source_data))
- start maybe_cuda:85 :11796.9Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:131 :11796.9Mb data_t = Variable(maybe_cuda(target_data))
- start maybe_cuda:85 :11796.9Mb return x.cuda() if torch.cuda.is_available() else x
- __main__ match_source_target:134 :11796.9Mb if discriminator_needs_update is True:
- __main__ match_source_target:136 :11796.9Mb feature_apply(partial(dist_func.attempt_update_d, epoch_i=epoch_i),
- __main__ match_source_target:137 :11796.9Mb base_model, target_model, data_s, data_t, shift)
- distances.utils feature_apply:103 :11796.9Mb a_phi = model_a.features_at(input_a, shift)
- model.fcn16 features_at:16 :11796.9Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :11796.9Mb h = x
- model.fcn16 features_at:19 :11796.9Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :11796.9Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :11796.9Mb h = self.pool1(h)
- model.fcn16 features_at:23 :11796.9Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :11796.9Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :11796.9Mb h = self.pool2(h)
- model.fcn16 features_at:27 :11796.9Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :11796.9Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :11796.9Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :11796.9Mb h = self.pool3(h)
- model.fcn16 features_at:32 :11796.9Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :11796.9Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :11796.9Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :11796.9Mb h = self.pool4(h)
- model.fcn16 features_at:37 :11796.9Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :11796.9Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :11796.9Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :11796.9Mb h = self.pool5(h)
- model.fcn16 features_at:42 :11796.9Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :11796.9Mb h = self.drop6(h)
- model.fcn16 features_at:45 :11796.9Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :11796.9Mb h = self.drop7(h)
- model.fcn16 features_at:48 :11796.9Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :11796.9Mb return h
- distances.utils feature_apply:104 :11796.9Mb b_phi = model_b.features_at(input_b, shift)
- model.fcn16 features_at:16 :11796.9Mb assert shift == -3, "!= -3 not implemented, not needed"
- model.fcn16 features_at:18 :11796.9Mb h = x
- model.fcn16 features_at:19 :11796.9Mb h = self.relu1_1(self.conv1_1(h))
- model.fcn16 features_at:20 :11796.9Mb h = self.relu1_2(self.conv1_2(h))
- model.fcn16 features_at:21 :11796.9Mb h = self.pool1(h)
- model.fcn16 features_at:23 :11796.9Mb h = self.relu2_1(self.conv2_1(h))
- model.fcn16 features_at:24 :11796.9Mb h = self.relu2_2(self.conv2_2(h))
- model.fcn16 features_at:25 :11796.9Mb h = self.pool2(h)
- model.fcn16 features_at:27 :11796.9Mb h = self.relu3_1(self.conv3_1(h))
- model.fcn16 features_at:28 :11796.9Mb h = self.relu3_2(self.conv3_2(h))
- model.fcn16 features_at:29 :11796.9Mb h = self.relu3_3(self.conv3_3(h))
- model.fcn16 features_at:30 :11796.9Mb h = self.pool3(h)
- model.fcn16 features_at:32 :11796.9Mb h = self.relu4_1(self.conv4_1(h))
- model.fcn16 features_at:33 :11796.9Mb h = self.relu4_2(self.conv4_2(h))
- model.fcn16 features_at:34 :11796.9Mb h = self.relu4_3(self.conv4_3(h))
- model.fcn16 features_at:35 :11796.9Mb h = self.pool4(h)
- model.fcn16 features_at:37 :11796.9Mb h = self.relu5_1(self.conv5_1(h))
- model.fcn16 features_at:38 :11796.9Mb h = self.relu5_2(self.conv5_2(h))
- model.fcn16 features_at:39 :11796.9Mb h = self.relu5_3(self.conv5_3(h))
- model.fcn16 features_at:40 :11796.9Mb h = self.pool5(h)
- model.fcn16 features_at:42 :11796.9Mb h = self.relu6(self.fc6(h))
- model.fcn16 features_at:43 :11796.9Mb h = self.drop6(h)
- model.fcn16 features_at:45 :11796.9Mb h = self.relu7(self.fc7(h))
- model.fcn16 features_at:46 :11796.9Mb h = self.drop7(h)
- model.fcn16 features_at:48 :11796.9Mb h = self.score_fr(h)
- model.fcn16 features_at:50 :11796.9Mb return h
- distances.utils feature_apply:105 :11796.9Mb dist_val = apply_func(a_phi, b_phi)
- distances.mlp_base attempt_update_d:62 :11796.9Mb self._check_inputs(features_a, features_b)
- distances.mlp_fcn _check_inputs:32 :11796.9Mb self._maybe_init(features_a)
- distances.mlp_fcn _maybe_init:28 :11796.9Mb if not self._inited:
- distances.mlp_fcn _maybe_init:29 :11796.9Mb self._init_net(some_feature_input.size()[1:])
- distances.mlp_fcn _init_net:37 :11796.9Mb if self._shapes is not None:
- distances.mlp_fcn _init_net:38 :11796.9Mb used_kern_sizes = (n_features[0],) + tuple(self._shapes['channels'])
- distances.mlp_fcn _init_net:39 :11796.9Mb self.cnn_net = self.classifier = nn.Sequential(
- distances.mlp_fcn _init_net:40 :11796.9Mb *chain.from_iterable(
- distances.mlp_fcn _init_net:41 :11796.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:45 :11796.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn _init_net:47 :11796.9Mb Flatten()
- distances.mlp_fcn <genexpr>:41 :11796.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :11796.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :11796.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn <genexpr>:45 :11796.9Mb for i in range(len(used_kern_sizes)-1)
- distances.mlp_fcn <genexpr>:41 :11796.9Mb (nn.Conv2d(used_kern_sizes[i], used_kern_sizes[i+1],
- distances.mlp_fcn _init_net:50 :11796.9Mb test_pass_var = Variable(torch.zeros(1, *n_features), volatile=True).cuda()
- distances.mlp_fcn _init_net:51 :11796.9Mb feature_n = self.cnn_net(test_pass_var).size(1) # because [1, F*H*W]
- distances.mlp_fcn forward:17 :11796.9Mb return X.view(X.size(0), -1)
- distances.mlp_fcn _init_net:52 :11796.9Mb self.net = nn.Sequential(
- distances.mlp_fcn _init_net:53 :11796.9Mb self.cnn_net,
- distances.mlp_fcn _init_net:54 :11796.9Mb nn.Linear(feature_n, 1)
- distances.mlp_fcn _init_net:57 :11796.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11796.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :11796.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :11796.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11796.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :11796.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11796.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :11796.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :11796.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11796.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :11796.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11796.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:59 :11796.9Mb self._initializer(w)
- distances.mlp_fcn _init_net:57 :11796.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:58 :11796.9Mb if w.ndimension() >= 2 and not getattr(w, 'initialized', False):
- distances.mlp_fcn _init_net:57 :11796.9Mb for w in self.parameters():
- distances.mlp_fcn _init_net:61 :11796.9Mb self._d_optimizer = self._optimizer_builder(self.parameters())
- distances.mlp_fcn _init_net:62 :11796.9Mb self._n_features = n_features
- distances.mlp_fcn _init_net:63 :11796.9Mb self.__inited = True
- distances.mlp_fcn _check_inputs:33 :11796.9Mb assert features_a.size()[1:] == features_b.size()[1:] == self._n_features
- distances.mlp_base attempt_update_d:64 :11796.9Mb if self._start_from_n_updates > 0 and epoch_i == 0:
- distances.mlp_base attempt_update_d:73 :11796.9Mb if (self._step_every_repeat_n[0] > 0
- distances.mlp_base attempt_update_d:74 :11796.9Mb and self._update_attempt_counter % self._step_every_repeat_n[0] == 0):
- distances.mlp_base attempt_update_d:75 :11796.9Mb for _ in range(self._step_every_repeat_n[1]):
- distances.mlp_base attempt_update_d:76 :11796.9Mb self.d_update_step(features_a, features_b)
- distances.mlp_base d_update_step:54 :11796.9Mb cross_ent = self.objective(features_a, features_b)
- distances.mlp objective:21 :11796.9Mb full_x = torch.cat([features_a, features_b], 0)
- distances.mlp objective:23 :11796.9Mb if self.stored_y is None:
- distances.mlp objective:28 :11796.9Mb _y = self.stored_y
- distances.mlp objective:30 :11796.9Mb full_y = _y if not inverse_labels else (1 - _y)
- distances.mlp objective:31 :11796.9Mb output = self.network_forward(full_x)
- distances.mlp_base network_forward:37 :11796.9Mb return self.net(x)
- distances.mlp_fcn forward:17 :11796.9Mb return X.view(X.size(0), -1)
- distances.mlp objective:32 :11796.9Mb cross_ent = F.binary_cross_entropy(F.sigmoid(output.view(-1)), full_y.view(-1))
- distances.mlp objective:33 :11796.9Mb assert not has_nan(cross_ent)
- distances.utils has_nan:83 :11796.9Mb if type(x) is float:
- distances.utils has_nan:86 :11796.9Mb x = x.data if type(x) is Variable else x
- distances.utils has_nan:87 :11796.9Mb return ((float(torch.sum(x != x)) > 0)
- distances.utils has_nan:88 :11796.9Mb or float(torch.sum(x == np.inf)) > 0)
- distances.mlp objective:34 :11796.9Mb return cross_ent
- distances.mlp_base d_update_step:56 :11796.9Mb self._d_optimizer.zero_grad()
- distances.mlp_base d_update_step:58 :11796.9Mb cross_ent.backward()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement