Advertisement
Guest User

modeling_gpt2.py

a guest
Feb 26th, 2020
1,619
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 35.21 KB | None | 0 0
  1. # coding=utf-8
  2. # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
  3. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  4. #
  5. # Licensed under the Apache License, Version 2.0 (the "License");
  6. # you may not use this file except in compliance with the License.
  7. # You may obtain a copy of the License at
  8. #
  9. # http://www.apache.org/licenses/LICENSE-2.0
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. """PyTorch OpenAI GPT-2 model."""
  17.  
  18.  
  19. import logging
  20. import math
  21. import os
  22.  
  23. import torch
  24. import torch.nn as nn
  25. from torch.nn import CrossEntropyLoss
  26.  
  27. import torch.utils
  28. import torch.utils.checkpoint
  29.  
  30.  
  31. from .activations import gelu_new
  32. from .configuration_gpt2 import GPT2Config
  33. from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
  34. from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
  35.  
  36.  
  37. logger = logging.getLogger(__name__)
  38.  
  39. GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {
  40. "gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
  41. "gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
  42. "gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin",
  43. "gpt2-xl": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-pytorch_model.bin",
  44. "distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",
  45. }
  46.  
  47.  
  48. def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
  49. """ Load tf checkpoints in a pytorch model
  50. """
  51. try:
  52. import re
  53. import tensorflow as tf
  54. except ImportError:
  55. logger.error(
  56. "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
  57. "https://www.tensorflow.org/install/ for installation instructions."
  58. )
  59. raise
  60. tf_path = os.path.abspath(gpt2_checkpoint_path)
  61. logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
  62. # Load weights from TF model
  63. init_vars = tf.train.list_variables(tf_path)
  64. names = []
  65. arrays = []
  66. for name, shape in init_vars:
  67. logger.info("Loading TF weight {} with shape {}".format(name, shape))
  68. array = tf.train.load_variable(tf_path, name)
  69. names.append(name)
  70. arrays.append(array.squeeze())
  71.  
  72. for name, array in zip(names, arrays):
  73. name = name[6:] # skip "model/"
  74. name = name.split("/")
  75. pointer = model
  76. for m_name in name:
  77. if re.fullmatch(r"[A-Za-z]+\d+", m_name):
  78. scope_names = re.split(r"(\d+)", m_name)
  79. else:
  80. scope_names = [m_name]
  81. if scope_names[0] == "w" or scope_names[0] == "g":
  82. pointer = getattr(pointer, "weight")
  83. elif scope_names[0] == "b":
  84. pointer = getattr(pointer, "bias")
  85. elif scope_names[0] == "wpe" or scope_names[0] == "wte":
  86. pointer = getattr(pointer, scope_names[0])
  87. pointer = getattr(pointer, "weight")
  88. else:
  89. pointer = getattr(pointer, scope_names[0])
  90. if len(scope_names) >= 2:
  91. num = int(scope_names[1])
  92. pointer = pointer[num]
  93. try:
  94. assert pointer.shape == array.shape
  95. except AssertionError as e:
  96. e.args += (pointer.shape, array.shape)
  97. raise
  98. logger.info("Initialize PyTorch weight {}".format(name))
  99. pointer.data = torch.from_numpy(array)
  100. return model
  101.  
  102.  
  103. class Attention(nn.Module):
  104. def __init__(self, nx, n_ctx, config, scale=False):
  105. super().__init__()
  106. self.output_attentions = config.output_attentions
  107.  
  108. n_state = nx # in Attention: n_state=768 (nx=n_embd)
  109. # [switch nx => n_state from Block to Attention to keep identical to TF implem]
  110. assert n_state % config.n_head == 0
  111. self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
  112. self.n_head = config.n_head
  113. self.split_size = n_state
  114. self.scale = scale
  115.  
  116. self.c_attn = Conv1D(n_state * 3, nx)
  117. self.c_proj = Conv1D(n_state, nx)
  118. self.attn_dropout = nn.Dropout(config.attn_pdrop)
  119. self.resid_dropout = nn.Dropout(config.resid_pdrop)
  120. self.pruned_heads = set()
  121.  
  122. def prune_heads(self, heads):
  123. if len(heads) == 0:
  124. return
  125. mask = torch.ones(self.n_head, self.split_size // self.n_head)
  126. heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
  127. for head in heads:
  128. # Compute how many pruned heads are before the head and move the index accordingly
  129. head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
  130. mask[head] = 0
  131. mask = mask.view(-1).contiguous().eq(1)
  132. index = torch.arange(len(mask))[mask].long()
  133. index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
  134.  
  135. # Prune conv1d layers
  136. self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
  137. self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
  138.  
  139. # Update hyper params
  140. self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
  141. self.n_head = self.n_head - len(heads)
  142. self.pruned_heads = self.pruned_heads.union(heads)
  143.  
  144. def _attn(self, q, k, v, attention_mask=None, head_mask=None):
  145. w = torch.matmul(q, k)
  146. if self.scale:
  147. w = w / math.sqrt(v.size(-1))
  148. nd, ns = w.size(-2), w.size(-1)
  149. b = self.bias[:, :, ns - nd : ns, :ns]
  150. w = w * b - 1e4 * (1 - b)
  151.  
  152. if attention_mask is not None:
  153. # Apply the attention mask
  154. w = w + attention_mask
  155.  
  156. w = nn.Softmax(dim=-1)(w)
  157. w = self.attn_dropout(w)
  158.  
  159. # Mask heads if we want to
  160. if head_mask is not None:
  161. w = w * head_mask
  162.  
  163. outputs = [torch.matmul(w, v)]
  164. if self.output_attentions:
  165. outputs.append(w)
  166. return outputs
  167.  
  168. def merge_heads(self, x):
  169. x = x.permute(0, 2, 1, 3).contiguous()
  170. new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
  171. return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
  172.  
  173. def split_heads(self, x, k=False):
  174. new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
  175. x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
  176. if k:
  177. return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
  178. else:
  179. return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
  180.  
  181. def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
  182. x = self.c_attn(x)
  183. query, key, value = x.split(self.split_size, dim=2)
  184. query = self.split_heads(query)
  185. key = self.split_heads(key, k=True)
  186. value = self.split_heads(value)
  187. if layer_past is not None:
  188. past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
  189. key = torch.cat((past_key, key), dim=-1)
  190. value = torch.cat((past_value, value), dim=-2)
  191. present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
  192.  
  193. attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
  194. a = attn_outputs[0]
  195.  
  196. a = self.merge_heads(a)
  197. a = self.c_proj(a)
  198. a = self.resid_dropout(a)
  199.  
  200. outputs = [a, present] + attn_outputs[1:]
  201.  
  202. return outputs # a, present, (attentions)
  203.  
  204.  
  205. class MLP(nn.Module):
  206. def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
  207. super().__init__()
  208. nx = config.n_embd
  209. self.c_fc = Conv1D(n_state, nx)
  210. self.c_proj = Conv1D(nx, n_state)
  211. self.act = gelu_new
  212. self.dropout = nn.Dropout(config.resid_pdrop)
  213.  
  214. def forward(self, x):
  215. h = self.act(self.c_fc(x))
  216. h2 = self.c_proj(h)
  217. return self.dropout(h2)
  218.  
  219.  
  220. class Block(nn.Module):
  221. def __init__(self, n_ctx, config, scale=False):
  222. super().__init__()
  223. nx = config.n_embd
  224. self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
  225. self.attn = Attention(nx, n_ctx, config, scale)
  226. self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
  227. self.mlp = MLP(4 * nx, config)
  228.  
  229. def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
  230. output_attn = self.attn(
  231. self.ln_1(x), layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask
  232. )
  233. a = output_attn[0] # output_attn: a, present, (attentions)
  234.  
  235. x = x + a
  236. m = self.mlp(self.ln_2(x))
  237. x = x + m
  238.  
  239. return x, output_attn[1]
  240. # outputs = [x] + output_attn[1:]
  241. # return outputs # x, present, (attentions)
  242.  
  243.  
  244. class GPT2PreTrainedModel(PreTrainedModel):
  245. """ An abstract class to handle weights initialization and
  246. a simple interface for downloading and loading pretrained models.
  247. """
  248.  
  249. config_class = GPT2Config
  250. pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
  251. load_tf_weights = load_tf_weights_in_gpt2
  252. base_model_prefix = "transformer"
  253.  
  254. def __init__(self, *inputs, **kwargs):
  255. super().__init__(*inputs, **kwargs)
  256.  
  257. def _init_weights(self, module):
  258. """ Initialize the weights.
  259. """
  260. if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
  261. # Slightly different from the TF version which uses truncated_normal for initialization
  262. # cf https://github.com/pytorch/pytorch/pull/5617
  263. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  264. if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
  265. module.bias.data.zero_()
  266. elif isinstance(module, nn.LayerNorm):
  267. module.bias.data.zero_()
  268. module.weight.data.fill_(1.0)
  269.  
  270.  
  271. GPT2_START_DOCSTRING = r"""
  272.  
  273. This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
  274. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
  275. usage and behavior.
  276.  
  277. Parameters:
  278. config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
  279. Initializing with a config file does not load the weights associated with the model, only the configuration.
  280. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
  281. """
  282.  
  283. GPT2_INPUTS_DOCSTRING = r"""
  284. Args:
  285. input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
  286. Indices of input sequence tokens in the vocabulary.
  287.  
  288. Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
  289. See :func:`transformers.PreTrainedTokenizer.encode` and
  290. :func:`transformers.PreTrainedTokenizer.encode_plus` for details.
  291.  
  292. `What are input IDs? <../glossary.html#input-ids>`__
  293. past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
  294. Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
  295. (see `past` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model
  296. should not be passed as input ids as they have already been computed.
  297. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
  298. Mask to avoid performing attention on padding token indices.
  299. Mask values selected in ``[0, 1]``:
  300. ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
  301.  
  302. `What are attention masks? <../glossary.html#attention-mask>`__
  303. token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
  304. Segment token indices to indicate first and second portions of the inputs.
  305. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
  306. corresponds to a `sentence B` token
  307.  
  308. `What are token type IDs? <../glossary.html#token-type-ids>`_
  309. position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
  310. Indices of positions of each input sequence tokens in the position embeddings.
  311. Selected in the range ``[0, config.max_position_embeddings - 1]``.
  312.  
  313. `What are position IDs? <../glossary.html#position-ids>`_
  314. head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
  315. Mask to nullify selected heads of the self-attention modules.
  316. Mask values selected in ``[0, 1]``:
  317. :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
  318. input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
  319. Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
  320. This is useful if you want more control over how to convert `input_ids` indices into associated vectors
  321. than the model's internal embedding lookup matrix.
  322. """
  323.  
  324.  
  325. @add_start_docstrings(
  326. "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
  327. GPT2_START_DOCSTRING,
  328. )
  329. class GPT2Model(GPT2PreTrainedModel):
  330. def __init__(self, config):
  331. super().__init__(config)
  332. self.output_hidden_states = config.output_hidden_states
  333. self.output_attentions = config.output_attentions
  334. self.output_past = config.output_past
  335.  
  336. self.wte = nn.Embedding(config.vocab_size, config.n_embd)
  337. self.wpe = nn.Embedding(config.n_positions, config.n_embd)
  338. self.drop = nn.Dropout(config.embd_pdrop)
  339. self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
  340. self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
  341.  
  342. self.init_weights()
  343.  
  344. def get_input_embeddings(self):
  345. return self.wte
  346.  
  347. def set_input_embeddings(self, new_embeddings):
  348. self.wte = new_embeddings
  349.  
  350. def _prune_heads(self, heads_to_prune):
  351. """ Prunes heads of the model.
  352. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
  353. """
  354. for layer, heads in heads_to_prune.items():
  355. self.h[layer].attn.prune_heads(heads)
  356.  
  357. @add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
  358. def forward(
  359. self,
  360. input_ids=None,
  361. past=None,
  362. attention_mask=None,
  363. token_type_ids=None,
  364. position_ids=None,
  365. head_mask=None,
  366. inputs_embeds=None,
  367. ):
  368. r"""
  369. Return:
  370. :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
  371. last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
  372. Sequence of hidden-states at the last layer of the model.
  373. past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
  374. Contains pre-computed hidden-states (key and values in the attention blocks).
  375. Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
  376. should not be passed as input ids as they have already been computed.
  377. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
  378. Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
  379. of shape :obj:`(batch_size, sequence_length, hidden_size)`.
  380.  
  381. Hidden-states of the model at the output of each layer plus the initial embedding outputs.
  382. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
  383. Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
  384. :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
  385.  
  386. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  387. heads.
  388.  
  389. Examples::
  390.  
  391. from transformers import GPT2Tokenizer, GPT2Model
  392. import torch
  393.  
  394. tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
  395. model = GPT2Model.from_pretrained('gpt2')
  396. input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
  397. outputs = model(input_ids)
  398. last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
  399.  
  400. """
  401. if input_ids is not None and inputs_embeds is not None:
  402. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  403. elif input_ids is not None:
  404. input_shape = input_ids.size()
  405. input_ids = input_ids.view(-1, input_shape[-1])
  406. elif inputs_embeds is not None:
  407. input_shape = inputs_embeds.size()[:-1]
  408. else:
  409. raise ValueError("You have to specify either input_ids or inputs_embeds")
  410.  
  411. if token_type_ids is not None:
  412. token_type_ids = token_type_ids.view(-1, input_shape[-1])
  413. if position_ids is not None:
  414. position_ids = position_ids.view(-1, input_shape[-1])
  415.  
  416. if past is None:
  417. past_length = 0
  418. past = [None] * len(self.h)
  419. else:
  420. past_length = past[0][0].size(-2)
  421. if position_ids is None:
  422. device = input_ids.device if input_ids is not None else inputs_embeds.device
  423. position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
  424. position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
  425.  
  426. # Attention mask.
  427. if attention_mask is not None:
  428. attention_mask = attention_mask.view(-1, input_shape[-1])
  429. # We create a 3D attention mask from a 2D tensor mask.
  430. # Sizes are [batch_size, 1, 1, to_seq_length]
  431. # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
  432. # this attention mask is more simple than the triangular masking of causal attention
  433. # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
  434. attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
  435.  
  436. # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
  437. # masked positions, this operation will create a tensor which is 0.0 for
  438. # positions we want to attend and -10000.0 for masked positions.
  439. # Since we are adding it to the raw scores before the softmax, this is
  440. # effectively the same as removing these entirely.
  441. attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
  442. attention_mask = (1.0 - attention_mask) * -10000.0
  443.  
  444. # Prepare head mask if needed
  445. # 1.0 in head_mask indicate we keep the head
  446. # attention_probs has shape bsz x n_heads x N x N
  447. # head_mask has shape n_layer x batch x n_heads x N x N
  448. if head_mask is not None:
  449. if head_mask.dim() == 1:
  450. head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
  451. head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
  452. elif head_mask.dim() == 2:
  453. head_mask = (
  454. head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
  455. ) # We can specify head_mask for each layer
  456. head_mask = head_mask.to(
  457. dtype=next(self.parameters()).dtype
  458. ) # switch to fload if need + fp16 compatibility
  459. else:
  460. head_mask = [None] * self.config.n_layer
  461.  
  462. if inputs_embeds is None:
  463.  
  464. inputs_embeds = torch.utils.checkpoint.checkpoint(self.wte, input_ids)
  465. # inputs_embeds = self.wte(input_ids)
  466.  
  467. # position_embeds = self.wpe(position_ids)
  468. position_embeds = torch.utils.checkpoint.checkpoint(self.wpe, position_ids)
  469. if token_type_ids is not None:
  470. # token_type_embeds = self.wte(token_type_ids)
  471. token_type_embeds = torch.utils.checkpoint.checkpoint(self.wte, token_type_ids)
  472. else:
  473. token_type_embeds = 0
  474. hidden_states = inputs_embeds + position_embeds + token_type_embeds
  475. hidden_states = self.drop(hidden_states)
  476.  
  477. output_shape = input_shape + (hidden_states.size(-1),)
  478.  
  479. presents = ()
  480. all_attentions = []
  481. all_hidden_states = ()
  482. for i, (block, layer_past) in enumerate(zip(self.h, past)):
  483. if self.output_hidden_states:
  484. all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
  485. # outputs = block(
  486. # hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask[i]
  487. # )
  488. outputs = torch.utils.checkpoint.checkpoint(block, hidden_states, layer_past, attention_mask, head_mask[i])
  489.  
  490. hidden_states, present = outputs[:2]
  491. if self.output_past:
  492. presents = presents + (present,)
  493.  
  494. if self.output_attentions:
  495. all_attentions.append(outputs[2])
  496.  
  497. hidden_states = torch.utils.checkpoint.checkpoint(self.ln_f, hidden_states)
  498. # hidden_states = self.ln_f(hidden_states)
  499.  
  500. hidden_states = hidden_states.view(*output_shape)
  501. # Add last hidden state
  502. if self.output_hidden_states:
  503. all_hidden_states = all_hidden_states + (hidden_states,)
  504.  
  505. outputs = (hidden_states,)
  506. if self.output_past:
  507. outputs = outputs + (presents,)
  508. if self.output_hidden_states:
  509. outputs = outputs + (all_hidden_states,)
  510. if self.output_attentions:
  511. # let the number of heads free (-1) so we can extract attention even after head pruning
  512. attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
  513. all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
  514. outputs = outputs + (all_attentions,)
  515. return outputs # last hidden state, (presents), (all hidden_states), (attentions)
  516.  
  517.  
  518. @add_start_docstrings(
  519. """The GPT2 Model transformer with a language modeling head on top
  520. (linear layer with weights tied to the input embeddings). """,
  521. GPT2_START_DOCSTRING,
  522. )
  523. class GPT2LMHeadModel(GPT2PreTrainedModel):
  524. def __init__(self, config):
  525. super().__init__(config)
  526. self.transformer = GPT2Model(config)
  527. self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
  528.  
  529. self.init_weights()
  530.  
  531. def get_output_embeddings(self):
  532. return self.lm_head
  533.  
  534. def prepare_inputs_for_generation(self, input_ids, **kwargs):
  535. # only last token for inputs_ids if past is defined in kwargs
  536. if "past" in kwargs and kwargs["past"]:
  537. input_ids = input_ids[:, -1].unsqueeze(-1)
  538.  
  539. inputs = {"input_ids": input_ids}
  540. inputs.update(kwargs)
  541. return inputs
  542.  
  543. @add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
  544. def forward(
  545. self,
  546. input_ids=None,
  547. past=None,
  548. attention_mask=None,
  549. token_type_ids=None,
  550. position_ids=None,
  551. head_mask=None,
  552. inputs_embeds=None,
  553. labels=None,
  554. ):
  555. r"""
  556. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
  557. Labels for language modeling.
  558. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
  559. Indices are selected in ``[-100, 0, ..., config.vocab_size]``
  560. All labels set to ``-100`` are ignored (masked), the loss is only
  561. computed for labels in ``[0, ..., config.vocab_size]``
  562.  
  563. Return:
  564. :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
  565. loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
  566. Language modeling loss.
  567. prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
  568. Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
  569. past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
  570. Contains pre-computed hidden-states (key and values in the attention blocks).
  571. Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
  572. should not be passed as input ids as they have already been computed.
  573. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
  574. Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
  575. of shape :obj:`(batch_size, sequence_length, hidden_size)`.
  576.  
  577. Hidden-states of the model at the output of each layer plus the initial embedding outputs.
  578. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
  579. Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
  580. :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
  581.  
  582. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  583. heads.
  584.  
  585. Examples::
  586.  
  587. import torch
  588. from transformers import GPT2Tokenizer, GPT2LMHeadModel
  589.  
  590. tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
  591. model = GPT2LMHeadModel.from_pretrained('gpt2')
  592.  
  593. input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
  594. outputs = model(input_ids, labels=input_ids)
  595. loss, logits = outputs[:2]
  596.  
  597. """
  598. transformer_outputs = self.transformer(
  599. input_ids,
  600. past=past,
  601. attention_mask=attention_mask,
  602. token_type_ids=token_type_ids,
  603. position_ids=position_ids,
  604. head_mask=head_mask,
  605. inputs_embeds=inputs_embeds,
  606. )
  607. hidden_states = transformer_outputs[0]
  608.  
  609. lm_logits = self.lm_head(hidden_states)
  610.  
  611. outputs = (lm_logits,) + transformer_outputs[1:]
  612. if labels is not None:
  613. # Shift so that tokens < n predict n
  614. shift_logits = lm_logits[..., :-1, :].contiguous()
  615. shift_labels = labels[..., 1:].contiguous()
  616. # Flatten the tokens
  617. loss_fct = CrossEntropyLoss()
  618. loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
  619. outputs = (loss,) + outputs
  620.  
  621. return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
  622.  
  623.  
  624. @add_start_docstrings(
  625. """The GPT2 Model transformer with a language modeling and a multiple-choice classification
  626. head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
  627. The language modeling head has its weights tied to the input embeddings,
  628. the classification head takes as input the input of a specified classification token index in the input sequence).
  629. """,
  630. GPT2_START_DOCSTRING,
  631. )
  632. class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
  633. def __init__(self, config):
  634. super().__init__(config)
  635. config.num_labels = 1
  636. self.transformer = GPT2Model(config)
  637. self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
  638. self.multiple_choice_head = SequenceSummary(config)
  639.  
  640. self.init_weights()
  641.  
  642. def get_output_embeddings(self):
  643. return self.lm_head
  644.  
  645. @add_start_docstrings_to_callable(GPT2_INPUTS_DOCSTRING)
  646. def forward(
  647. self,
  648. input_ids=None,
  649. past=None,
  650. attention_mask=None,
  651. token_type_ids=None,
  652. position_ids=None,
  653. head_mask=None,
  654. inputs_embeds=None,
  655. mc_token_ids=None,
  656. lm_labels=None,
  657. mc_labels=None,
  658. ):
  659. r"""
  660. mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
  661. Index of the classification token in each input sequence.
  662. Selected in the range ``[0, input_ids.size(-1) - 1[``.
  663. lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
  664. Labels for language modeling.
  665. Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
  666. Indices are selected in ``[-1, 0, ..., config.vocab_size]``
  667. All labels set to ``-100`` are ignored (masked), the loss is only
  668. computed for labels in ``[0, ..., config.vocab_size]``
  669. mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
  670. Labels for computing the multiple choice classification loss.
  671. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
  672. of the input tensors. (see `input_ids` above)
  673.  
  674. Return:
  675. :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.GPT2Config`) and inputs:
  676. lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``lm_labels`` is provided):
  677. Language modeling loss.
  678. mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`multiple_choice_labels` is provided):
  679. Multiple choice classification loss.
  680. lm_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
  681. Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
  682. mc_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
  683. Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
  684. past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
  685. Contains pre-computed hidden-states (key and values in the attention blocks).
  686. Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
  687. should not be passed as input ids as they have already been computed.
  688. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
  689. Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
  690. of shape :obj:`(batch_size, sequence_length, hidden_size)`.
  691.  
  692. Hidden-states of the model at the output of each layer plus the initial embedding outputs.
  693. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
  694. Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
  695. :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
  696.  
  697. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  698. heads.
  699.  
  700. Examples::
  701.  
  702. import torch
  703. from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
  704.  
  705. tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
  706. model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
  707.  
  708. # Add a [CLS] to the vocabulary (we should train it also!)
  709. tokenizer.add_special_tokens({'cls_token': '[CLS]'})
  710. model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
  711. print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
  712.  
  713. choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
  714. encoded_choices = [tokenizer.encode(s) for s in choices]
  715. cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
  716.  
  717. input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
  718. mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
  719.  
  720. outputs = model(input_ids, mc_token_ids=mc_token_ids)
  721. lm_prediction_scores, mc_prediction_scores = outputs[:2]
  722.  
  723. """
  724. transformer_outputs = self.transformer(
  725. input_ids,
  726. past=past,
  727. attention_mask=attention_mask,
  728. token_type_ids=token_type_ids,
  729. position_ids=position_ids,
  730. head_mask=head_mask,
  731. inputs_embeds=inputs_embeds,
  732. )
  733.  
  734. hidden_states = transformer_outputs[0]
  735.  
  736. lm_logits = self.lm_head(hidden_states)
  737. mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
  738.  
  739. outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
  740. if mc_labels is not None:
  741. loss_fct = CrossEntropyLoss()
  742. loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
  743. outputs = (loss,) + outputs
  744. if lm_labels is not None:
  745. shift_logits = lm_logits[..., :-1, :].contiguous()
  746. shift_labels = lm_labels[..., 1:].contiguous()
  747. loss_fct = CrossEntropyLoss()
  748. loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
  749. outputs = (loss,) + outputs
  750.  
  751. return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement