Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ---------------------------------------------------------------------------
- RuntimeError Traceback (most recent call last)
- <ipython-input-14-243bd3d406ab> in <module>
- ----> 1 learner.fine_tune(10)
- /usr/local/lib/python3.8/dist-packages/fastcore/utils.py in _f(*args, **kwargs)
- 429 init_args.update(log)
- 430 setattr(inst, 'init_args', init_args)
- --> 431 return inst if to_return else f(*args, **kwargs)
- 432 return _f
- 433
- /usr/local/lib/python3.8/dist-packages/fastai2/callback/schedule.py in fine_tune(self, epochs, base_lr, freeze_epochs, lr_mult, pct_start, div, **kwargs)
- 146 "Fine tune with `freeze` for `freeze_epochs` then with `unfreeze` from `epochs` using discriminative LR"
- 147 self.freeze()
- --> 148 self.fit_one_cycle(freeze_epochs, slice(base_lr), pct_start=0.99, **kwargs)
- 149 base_lr /= 2
- 150 self.unfreeze()
- /usr/local/lib/python3.8/dist-packages/fastcore/utils.py in _f(*args, **kwargs)
- 429 init_args.update(log)
- 430 setattr(inst, 'init_args', init_args)
- --> 431 return inst if to_return else f(*args, **kwargs)
- 432 return _f
- 433
- /usr/local/lib/python3.8/dist-packages/fastai2/callback/schedule.py in fit_one_cycle(self, n_epoch, lr_max, div, div_final, pct_start, wd, moms, cbs, reset_opt)
- 98 scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
- 99 'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
- --> 100 self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd)
- 101
- 102 # Cell
- /usr/local/lib/python3.8/dist-packages/fastcore/utils.py in _f(*args, **kwargs)
- 429 init_args.update(log)
- 430 setattr(inst, 'init_args', init_args)
- --> 431 return inst if to_return else f(*args, **kwargs)
- 432 return _f
- 433
- /usr/local/lib/python3.8/dist-packages/fastai2/learner.py in fit(self, n_epoch, lr, wd, cbs, reset_opt)
- 201 try:
- 202 self.epoch=epoch; self('begin_epoch')
- --> 203 self._do_epoch_train()
- 204 self._do_epoch_validate()
- 205 except CancelEpochException: self('after_cancel_epoch')
- /usr/local/lib/python3.8/dist-packages/fastai2/learner.py in _do_epoch_train(self)
- 173 try:
- 174 self.dl = self.dls.train; self('begin_train')
- --> 175 self.all_batches()
- 176 except CancelTrainException: self('after_cancel_train')
- 177 finally: self('after_train')
- /usr/local/lib/python3.8/dist-packages/fastai2/learner.py in all_batches(self)
- 151 def all_batches(self):
- 152 self.n_iter = len(self.dl)
- --> 153 for o in enumerate(self.dl): self.one_batch(*o)
- 154
- 155 def one_batch(self, i, b):
- /usr/local/lib/python3.8/dist-packages/fastai2/learner.py in one_batch(self, i, b)
- 157 try:
- 158 self._split(b); self('begin_batch')
- --> 159 self.pred = self.model(*self.xb); self('after_pred')
- 160 if len(self.yb) == 0: return
- 161 self.loss = self.loss_func(self.pred, *self.yb); self('after_loss')
- /usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
- 548 result = self._slow_forward(*input, **kwargs)
- 549 else:
- --> 550 result = self.forward(*input, **kwargs)
- 551 for hook in self._forward_hooks.values():
- 552 hook_result = hook(self, input, result)
- /usr/local/lib/python3.8/dist-packages/efficientnet_pytorch/model.py in forward(self, inputs)
- 191 bs = inputs.size(0)
- 192 # Convolution layers
- --> 193 x = self.extract_features(inputs)
- 194
- 195 # Pooling and final linear layer
- /usr/local/lib/python3.8/dist-packages/efficientnet_pytorch/model.py in extract_features(self, inputs)
- 180 if drop_connect_rate:
- 181 drop_connect_rate *= float(idx) / len(self._blocks)
- --> 182 x = block(x, drop_connect_rate=drop_connect_rate)
- 183
- 184 # Head
- /usr/local/lib/python3.8/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
- 550 result = self.forward(*input, **kwargs)
- 551 for hook in self._forward_hooks.values():
- --> 552 hook_result = hook(self, input, result)
- 553 if hook_result is not None:
- 554 result = hook_result
- ~/Jupyter/manifold_mixup.py in hook_mixup(self, module, input, output)
- 140 if not self.mixup_has_been_applied: # performs mixup
- 141 output_dims = len(output.size())
- --> 142 output = torch.lerp(output[self.shuffle], output, weight=unsqueeze(self.lam, n=output_dims-1))
- 143 self.mixup_has_been_applied = True
- 144 return output
- RuntimeError: expected dtype Float but got dtype Half
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement