Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # coding=utf-8
- # Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """ PyTorch GPT-J model."""
- import warnings
- from typing import Optional, Tuple, Union
- import torch
- import torch.utils.checkpoint
- from torch import nn
- from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
- from ...activations import ACT2FN
- from ...modeling_outputs import (
- BaseModelOutputWithPast,
- CausalLMOutputWithPast,
- QuestionAnsweringModelOutput,
- SequenceClassifierOutputWithPast,
- )
- from ...modeling_utils import PreTrainedModel
- from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
- from ...utils.model_parallel_utils import assert_device_map, get_device_map
- from .configuration_gptj import GPTJConfig
- logger = logging.get_logger(__name__)
- _CHECKPOINT_FOR_DOC = "hf-internal-testing/tiny-random-gptj"
- _REAL_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
- _CONFIG_FOR_DOC = "GPTJConfig"
- GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
- "EleutherAI/gpt-j-6B",
- # See all GPT-J models at https://huggingface.co/models?filter=gptj
- ]
- def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
- inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
- sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq).float()
- return torch.concat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
- def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
- x1 = x[:, :, :, ::2]
- x2 = x[:, :, :, 1::2]
- x = torch.stack((-x2, x1), dim=-1)
- return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
- def apply_rotary_pos_emb(tensor: torch.Tensor, sincos: torch.Tensor) -> torch.Tensor:
- sin = sincos[0].repeat_interleave(2, dim=-1)[:, :, None, :]
- cos = sincos[1].repeat_interleave(2, dim=-1)[:, :, None, :]
- return (tensor * cos) + (rotate_every_two(tensor) * sin)
- class GPTJAttention(nn.Module):
- def __init__(self, config):
- super().__init__()
- max_positions = config.max_position_embeddings
- self.register_buffer(
- "bias",
- torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
- 1, 1, max_positions, max_positions
- ),
- )
- self.register_buffer("masked_bias", torch.tensor(-1e9))
- self.attn_dropout = nn.Dropout(config.attn_pdrop)
- self.resid_dropout = nn.Dropout(config.resid_pdrop)
- self.embed_dim = config.hidden_size
- self.num_attention_heads = config.num_attention_heads
- self.head_dim = self.embed_dim // self.num_attention_heads
- if self.head_dim * self.num_attention_heads != self.embed_dim:
- raise ValueError(
- f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
- f" `num_attention_heads`: {self.num_attention_heads})."
- )
- self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
- self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
- self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
- self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
- self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
- self.rotary_dim = config.rotary_dim
- pos_embd_dim = self.rotary_dim or self.embed_dim
- self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
- def _split_heads(self, tensor, num_attention_heads, attn_head_size, rotary):
- """
- Splits hidden dim into attn_head_size and num_attention_heads
- """
- new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)
- tensor = tensor.view(new_shape)
- if rotary:
- return tensor
- # if tensor.dim() == 5, flattens the first two dims, permutes, then inflates back the first two dims
- original_shape = tensor.size()
- tensor = tensor.view((-1,) + original_shape[-3:])
- tensor = tensor.permute(0, 2, 1, 3)
- tensor = tensor.view(original_shape[:-3] + tensor.size()[-3:])
- return tensor
- def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
- """
- Merges attn_head_size dim and num_attn_heads dim into hidden dim
- """
- # if tensor.dim() == 5, flattens the first two dims, permutes, then inflates back the first two dims
- original_shape = tensor.size()
- tensor = tensor.view((-1,) + original_shape[-3:])
- tensor = tensor.permute(0, 2, 1, 3)
- tensor = tensor.view(original_shape[:-3] + tensor.size()[-3:])
- new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
- return tensor.reshape(new_shape)
- def _attn(
- self,
- query,
- key,
- value,
- attention_mask=None,
- head_mask=None,
- ):
- # compute causal mask from causal mask buffer
- query_length, key_length = query.size(-2), key.size(-2)
- causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
- # Keep the attention weights computation in fp32 to avoid overflow issues
- query = query.to(torch.float32)
- key = key.to(torch.float32)
- attn_weights = torch.matmul(query, key.transpose(-1, -2))
- mask_value = torch.finfo(attn_weights.dtype).min
- # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
- # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
- mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
- attn_weights = torch.where(causal_mask, attn_weights, mask_value)
- attn_weights = attn_weights / self.scale_attn
- if attention_mask is not None:
- # Apply the attention mask
- attn_weights = attn_weights + attention_mask
- attn_weights = nn.functional.softmax(attn_weights, dim=-1)
- attn_weights = attn_weights.to(value.dtype)
- attn_weights = self.attn_dropout(attn_weights)
- # Mask heads if we want to
- if head_mask is not None:
- attn_weights = attn_weights * head_mask
- attn_output = torch.matmul(attn_weights, value)
- return attn_output, attn_weights
- def forward(
- self,
- hidden_states: Optional[torch.FloatTensor],
- layer_past: Optional[Tuple[torch.Tensor]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = False,
- output_attentions: Optional[bool] = False,
- ) -> Union[
- Tuple[torch.Tensor, Tuple[torch.Tensor]],
- Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
- ]:
- query = self.q_proj(hidden_states)
- key = self.k_proj(hidden_states)
- value = self.v_proj(hidden_states)
- query = self._split_heads(query, self.num_attention_heads, self.head_dim, True)
- key = self._split_heads(key, self.num_attention_heads, self.head_dim, True)
- value = self._split_heads(value, self.num_attention_heads, self.head_dim, False)
- embed_positions = self.embed_positions
- if embed_positions.device != position_ids.device:
- embed_positions = embed_positions.to(position_ids.device)
- self.embed_positions = embed_positions
- sincos = embed_positions[position_ids]
- sincos = sincos.chunk(2, dim=-1)
- sincos = [sincos[0].contiguous(), sincos[1].contiguous()]
- if self.rotary_dim is not None:
- k_rot = key[:, :, :, : self.rotary_dim]
- k_pass = key[:, :, :, self.rotary_dim :]
- q_rot = query[:, :, :, : self.rotary_dim]
- q_pass = query[:, :, :, self.rotary_dim :]
- k_rot = apply_rotary_pos_emb(k_rot, sincos)
- q_rot = apply_rotary_pos_emb(q_rot, sincos)
- key = torch.cat([k_rot, k_pass], dim=-1)
- query = torch.cat([q_rot, q_pass], dim=-1)
- else:
- key = apply_rotary_pos_emb(key, sincos)
- query = apply_rotary_pos_emb(query, sincos)
- key = key.permute(0, 2, 1, 3)
- query = query.permute(0, 2, 1, 3)
- if layer_past is not None:
- past_key = layer_past[0]
- past_value = layer_past[1]
- key = torch.cat((past_key, key), dim=-2)
- value = torch.cat((past_value, value), dim=-2)
- if use_cache is True:
- present = (key, value)
- else:
- present = None
- # compute self-attention: V x Softmax(QK^T)
- attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
- attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
- attn_output = self.out_proj(attn_output)
- attn_output = self.resid_dropout(attn_output)
- outputs = (attn_output, present)
- if output_attentions:
- outputs += (attn_weights,)
- return outputs # a, present, (attentions)
- class GPTJMLP(nn.Module):
- def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
- super().__init__()
- embed_dim = config.n_embd
- self.fc_in = nn.Linear(embed_dim, intermediate_size)
- self.fc_out = nn.Linear(intermediate_size, embed_dim)
- self.act = ACT2FN[config.activation_function]
- self.dropout = nn.Dropout(config.resid_pdrop)
- def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
- hidden_states = self.fc_in(hidden_states)
- hidden_states = self.act(hidden_states)
- hidden_states = self.fc_out(hidden_states)
- hidden_states = self.dropout(hidden_states)
- return hidden_states
- class GPTJBlock(nn.Module):
- def __init__(self, config):
- super().__init__()
- inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
- self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
- self.attn = GPTJAttention(config)
- self.mlp = GPTJMLP(inner_dim, config)
- def forward(
- self,
- hidden_states: Optional[torch.FloatTensor],
- layer_past: Optional[Tuple[torch.Tensor]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- position_ids: Optional[torch.FloatTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = False,
- output_attentions: Optional[bool] = False,
- ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
- residual = hidden_states
- hidden_states = self.ln_1(hidden_states)
- attn_outputs = self.attn(
- hidden_states=hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- position_ids=position_ids,
- head_mask=head_mask,
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
- attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
- outputs = attn_outputs[1:]
- feed_forward_hidden_states = self.mlp(hidden_states)
- hidden_states = attn_output + feed_forward_hidden_states + residual
- if use_cache:
- outputs = (hidden_states,) + outputs
- else:
- outputs = (hidden_states,) + outputs[1:]
- return outputs # hidden_states, present, (attentions)
- class GPTJPreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
- config_class = GPTJConfig
- base_model_prefix = "transformer"
- is_parallelizable = True
- supports_gradient_checkpointing = True
- _no_split_modules = ["GPTJBlock"]
- def __init__(self, *inputs, **kwargs):
- super().__init__(*inputs, **kwargs)
- def _init_weights(self, module):
- """Initialize the weights."""
- if isinstance(module, (nn.Linear,)):
- # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.bias is not None:
- module.bias.data.zero_()
- elif isinstance(module, nn.Embedding):
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- if module.padding_idx is not None:
- module.weight.data[module.padding_idx].zero_()
- elif isinstance(module, nn.LayerNorm):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
- def _set_gradient_checkpointing(self, module, value=False):
- if isinstance(module, GPTJModel):
- module.gradient_checkpointing = value
- GPTJ_START_DOCSTRING = r"""
- This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
- it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
- behavior.
- Parameters:
- config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
- """
- GPTJ_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`torch.LongTensor` of shape `({0})`):
- Indices of input sequence tokens in the vocabulary.
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- [What are attention masks?](../glossary#attention-mask)
- token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
- 1]`:
- - 0 corresponds to a *sentence A* token,
- - 1 corresponds to a *sentence B* token.
- [What are token type IDs?](../glossary#token-type-ids)
- position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
- config.n_positions - 1]`.
- [What are position IDs?](../glossary#position-ids)
- head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
- Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
- inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
- is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
- model's internal embedding lookup matrix.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
- """
- PARALLELIZE_DOCSTRING = r"""
- This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute
- attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks
- across all devices.
- Args:
- device_map (`Dict[int, list]`, optional, defaults to None):
- A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
- automatically mapped to the first device (for esoteric reasons). That means that the first device should
- have fewer attention modules mapped to it than other devices. For reference, the GPT-J models have the
- following number of attention modules:
- - gpt-j-6B: 28
- Example:
- ```python
- # Here is an example of a device map on a machine with 4 GPUs using gpt-j-6B, which has a total of 28 attention modules:
- model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
- device_map = {
- 0: [0, 1, 2, 3, 4, 5, 6],
- 1: [7, 8, 9, 10, 11, 12, 13],
- 2: [14, 15, 16, 17, 18, 19, 20],
- 3: [21, 22, 23, 24, 25, 26, 27],
- }
- model.parallelize(device_map)
- ```
- """
- DEPARALLELIZE_DOCSTRING = r"""
- Moves the model to CPU from a model parallel state.
- Example:
- ```python
- # On a 4 GPU machine with gpt-j-6B:
- model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
- device_map = {
- 0: [0, 1, 2, 3, 4, 5, 6],
- 1: [7, 8, 9, 10, 11, 12, 13],
- 2: [14, 15, 16, 17, 18, 19, 20],
- 3: [21, 22, 23, 24, 25, 26, 27],
- }
- model.parallelize(device_map) # Splits the model across several devices
- model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
- ```
- """
- @add_start_docstrings(
- "The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
- GPTJ_START_DOCSTRING,
- )
- class GPTJModel(GPTJPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- self.embed_dim = config.n_embd
- self.vocab_size = config.vocab_size
- self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
- self.drop = nn.Dropout(config.embd_pdrop)
- self.h = nn.ModuleList([GPTJBlock(config) for _ in range(config.n_layer)])
- self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
- # Model parallel
- self.model_parallel = False
- self.device_map = None
- self.gradient_checkpointing = False
- # Initialize weights and apply final processing
- self.post_init()
- @add_start_docstrings(PARALLELIZE_DOCSTRING)
- def parallelize(self, device_map=None):
- warnings.warn(
- "`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your"
- " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
- " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1,"
- " ...}",
- FutureWarning,
- )
- # Check validity of device_map
- self.device_map = (
- get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
- )
- assert_device_map(self.device_map, len(self.h))
- self.model_parallel = True
- self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
- self.last_device = "cuda:" + str(max(self.device_map.keys()))
- self.wte = self.wte.to(self.first_device)
- # Load onto devices
- for k, v in self.device_map.items():
- for block in v:
- cuda_device = "cuda:" + str(k)
- self.h[block] = self.h[block].to(cuda_device)
- # ln_f to last
- self.ln_f = self.ln_f.to(self.last_device)
- @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
- def deparallelize(self):
- warnings.warn(
- "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
- FutureWarning,
- )
- self.model_parallel = False
- self.device_map = None
- self.first_device = "cpu"
- self.last_device = "cpu"
- self.wte = self.wte.to("cpu")
- for index in range(len(self.h)):
- self.h[index] = self.h[index].to("cpu")
- self.ln_f = self.ln_f.to("cpu")
- torch.cuda.empty_cache()
- def get_input_embeddings(self):
- return self.wte
- def set_input_embeddings(self, new_embeddings):
- self.wte = new_embeddings
- @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=BaseModelOutputWithPast,
- config_class=_CONFIG_FOR_DOC,
- real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
- )
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutputWithPast]:
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = input_ids.size()
- input_ids = input_ids.view(-1, input_shape[-1])
- batch_size = input_ids.shape[0]
- elif inputs_embeds is not None:
- input_shape = inputs_embeds.size()[:-1]
- batch_size = inputs_embeds.shape[0]
- else:
- raise ValueError("You have to specify either input_ids or inputs_embeds")
- device = input_ids.device if input_ids is not None else inputs_embeds.device
- if token_type_ids is not None:
- token_type_ids = token_type_ids.view(-1, input_shape[-1])
- if position_ids is not None:
- position_ids = position_ids.view(-1, input_shape[-1]).long()
- if past_key_values is None:
- past_length = 0
- past_key_values = tuple([None] * len(self.h))
- else:
- past_length = past_key_values[0][0].size(-2)
- if position_ids is None:
- position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
- position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
- # Attention mask.
- if attention_mask is not None:
- if batch_size <= 0:
- raise ValueError("batch_size has to be defined and > 0")
- attention_mask = attention_mask.view(batch_size, -1)
- # We create a 3D attention mask from a 2D tensor mask.
- # Sizes are [batch_size, 1, 1, to_seq_length]
- # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
- # this attention mask is more simple than the triangular masking of causal attention
- # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
- attention_mask = attention_mask[:, None, None, :]
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
- # masked positions, this operation will create a tensor which is 0.0 for
- # positions we want to attend and the dtype's smallest value for masked positions.
- # Since we are adding it to the raw scores before the softmax, this is
- # effectively the same as removing these entirely.
- attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
- attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
- # Prepare head mask if needed
- # 1.0 in head_mask indicate we keep the head
- # attention_probs has shape bsz x num_attention_heads x N x N
- # head_mask has shape n_layer x batch x num_attention_heads x N x N
- head_mask = self.get_head_mask(head_mask, self.config.n_layer)
- if inputs_embeds is None:
- inputs_embeds = self.wte(input_ids)
- hidden_states = inputs_embeds
- if token_type_ids is not None:
- token_type_embeds = self.wte(token_type_ids)
- hidden_states = hidden_states + token_type_embeds
- hidden_states = self.drop(hidden_states)
- output_shape = input_shape + (hidden_states.size(-1),)
- if self.gradient_checkpointing and self.training:
- if use_cache:
- logger.warning_once(
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
- )
- use_cache = False
- presents = () if use_cache else None
- all_self_attentions = () if output_attentions else None
- all_hidden_states = () if output_hidden_states else None
- for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
- # Model parallel
- if self.model_parallel:
- torch.cuda.set_device(hidden_states.device)
- # Ensure layer_past is on same device as hidden_states (might not be correct)
- if layer_past is not None:
- layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
- # Ensure that attention_mask is always on the same device as hidden_states
- if attention_mask is not None:
- attention_mask = attention_mask.to(hidden_states.device)
- if isinstance(head_mask, torch.Tensor):
- head_mask = head_mask.to(hidden_states.device)
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
- if self.gradient_checkpointing and self.training:
- def create_custom_forward(module):
- def custom_forward(*inputs):
- # None for past_key_value
- return module(*inputs, use_cache, output_attentions)
- return custom_forward
- outputs = torch.utils.checkpoint.checkpoint(
- create_custom_forward(block),
- hidden_states,
- None,
- attention_mask,
- position_ids,
- head_mask[i],
- )
- else:
- outputs = block(
- hidden_states=hidden_states,
- layer_past=layer_past,
- attention_mask=attention_mask,
- position_ids=position_ids,
- head_mask=head_mask[i],
- use_cache=use_cache,
- output_attentions=output_attentions,
- )
- hidden_states = outputs[0]
- if use_cache is True:
- presents = presents + (outputs[1],)
- if output_attentions:
- all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
- # Model Parallel: If it's the last layer for that device, put things on the next device
- if self.model_parallel:
- for k, v in self.device_map.items():
- if i == v[-1] and "cuda:" + str(k) != self.last_device:
- hidden_states = hidden_states.to("cuda:" + str(k + 1))
- hidden_states = self.ln_f(hidden_states)
- hidden_states = hidden_states.view(output_shape)
- # Add last hidden state
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
- if not return_dict:
- return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
- return BaseModelOutputWithPast(
- last_hidden_state=hidden_states,
- past_key_values=presents,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
- @add_start_docstrings(
- """
- The GPT-J Model transformer with a language modeling head on top.
- """,
- GPTJ_START_DOCSTRING,
- )
- class GPTJForCausalLM(GPTJPreTrainedModel):
- _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias"]
- def __init__(self, config):
- super().__init__(config)
- self.transformer = GPTJModel(config)
- self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
- # Model parallel
- self.model_parallel = False
- self.device_map = None
- # Initialize weights and apply final processing
- self.post_init()
- @add_start_docstrings(PARALLELIZE_DOCSTRING)
- def parallelize(self, device_map=None):
- warnings.warn(
- "`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load"
- " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own"
- " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':"
- " 0, 'transformer.h.1': 1, ...}",
- FutureWarning,
- )
- self.device_map = (
- get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
- if device_map is None
- else device_map
- )
- assert_device_map(self.device_map, len(self.transformer.h))
- self.transformer.parallelize(self.device_map)
- self.lm_head = self.lm_head.to(self.transformer.first_device)
- self.model_parallel = True
- @add_start_docstrings(DEPARALLELIZE_DOCSTRING)
- def deparallelize(self):
- warnings.warn(
- "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.",
- FutureWarning,
- )
- self.transformer.deparallelize()
- self.transformer = self.transformer.to("cpu")
- self.lm_head = self.lm_head.to("cpu")
- self.model_parallel = False
- torch.cuda.empty_cache()
- def get_output_embeddings(self):
- return self.lm_head
- def set_output_embeddings(self, new_embeddings):
- self.lm_head = new_embeddings
- def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
- token_type_ids = kwargs.get("token_type_ids", None)
- # only last token for inputs_ids if past is defined in kwargs
- if past_key_values:
- input_ids = input_ids[:, -1].unsqueeze(-1)
- if token_type_ids is not None:
- token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
- attention_mask = kwargs.get("attention_mask", None)
- position_ids = kwargs.get("position_ids", None)
- if attention_mask is not None and position_ids is None:
- # create position_ids on the fly for batch generation
- position_ids = attention_mask.long().cumsum(-1) - 1
- position_ids.masked_fill_(attention_mask == 0, 1)
- if past_key_values:
- position_ids = position_ids[:, -1].unsqueeze(-1)
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
- if inputs_embeds is not None and past_key_values is None:
- model_inputs = {"inputs_embeds": inputs_embeds}
- else:
- model_inputs = {"input_ids": input_ids}
- model_inputs.update(
- {
- "past_key_values": past_key_values,
- "use_cache": kwargs.get("use_cache"),
- "position_ids": position_ids,
- "attention_mask": attention_mask,
- "token_type_ids": token_type_ids,
- }
- )
- return model_inputs
- @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=CausalLMOutputWithPast,
- config_class=_CONFIG_FOR_DOC,
- real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
- )
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, CausalLMOutputWithPast]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
- `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
- are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = transformer_outputs[0]
- # Set device for model parallelism
- if self.model_parallel:
- torch.cuda.set_device(self.transformer.first_device)
- hidden_states = hidden_states.to(self.lm_head.weight.device)
- # make sure sampling in fp16 works correctly and
- # compute loss in fp32 to match with mesh-tf version
- # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
- lm_logits = self.lm_head(hidden_states).to(torch.float32)
- loss = None
- if labels is not None:
- # Shift so that tokens < n predict n
- shift_logits = lm_logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
- # Flatten the tokens
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
- loss = loss.to(hidden_states.dtype)
- if not return_dict:
- output = (lm_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
- return CausalLMOutputWithPast(
- loss=loss,
- logits=lm_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
- @staticmethod
- def _reorder_cache(
- past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
- ) -> Tuple[Tuple[torch.Tensor]]:
- """
- This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
- [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
- beam_idx at every generation step.
- """
- return tuple(
- tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
- for layer_past in past_key_values
- )
- @add_start_docstrings(
- """
- The GPT-J Model transformer with a sequence classification head on top (linear layer).
- [`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
- (e.g. GPT, GPT-2, GPT-Neo) do.
- Since it does classification on the last token, it requires to know the position of the last token. If a
- `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
- no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
- padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
- each row of the batch).
- """,
- GPTJ_START_DOCSTRING,
- )
- class GPTJForSequenceClassification(GPTJPreTrainedModel):
- _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias", r"lm_head.weight"]
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
- self.transformer = GPTJModel(config)
- self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
- # Model parallel
- self.model_parallel = False
- self.device_map = None
- # Initialize weights and apply final processing
- self.post_init()
- @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint="ydshieh/tiny-random-gptj-for-sequence-classification",
- output_type=SequenceClassifierOutputWithPast,
- config_class=_CONFIG_FOR_DOC,
- real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
- )
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- labels: Optional[torch.LongTensor] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- transformer_outputs = self.transformer(
- input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = transformer_outputs[0]
- logits = self.score(hidden_states)
- if input_ids is not None:
- batch_size = input_ids.shape[0]
- else:
- batch_size = inputs_embeds.shape[0]
- if self.config.pad_token_id is None and batch_size != 1:
- raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
- if self.config.pad_token_id is None:
- sequence_lengths = -1
- else:
- if input_ids is not None:
- sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
- else:
- sequence_lengths = -1
- logger.warning(
- f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
- "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
- )
- pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
- loss = None
- if labels is not None:
- if self.config.problem_type is None:
- if self.num_labels == 1:
- self.config.problem_type = "regression"
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
- self.config.problem_type = "single_label_classification"
- else:
- self.config.problem_type = "multi_label_classification"
- if self.config.problem_type == "regression":
- loss_fct = MSELoss()
- if self.num_labels == 1:
- loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
- else:
- loss = loss_fct(pooled_logits, labels)
- elif self.config.problem_type == "single_label_classification":
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
- elif self.config.problem_type == "multi_label_classification":
- loss_fct = BCEWithLogitsLoss()
- loss = loss_fct(pooled_logits, labels)
- if not return_dict:
- output = (pooled_logits,) + transformer_outputs[1:]
- return ((loss,) + output) if loss is not None else output
- return SequenceClassifierOutputWithPast(
- loss=loss,
- logits=pooled_logits,
- past_key_values=transformer_outputs.past_key_values,
- hidden_states=transformer_outputs.hidden_states,
- attentions=transformer_outputs.attentions,
- )
- @add_start_docstrings(
- """
- The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
- SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
- """,
- GPTJ_START_DOCSTRING,
- )
- class GPTJForQuestionAnswering(GPTJPreTrainedModel):
- _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias", r"lm_head.weight"]
- def __init__(self, config):
- super().__init__(config)
- self.num_labels = config.num_labels
- self.transformer = GPTJModel(config)
- self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
- # Model parallel
- self.model_parallel = False
- self.device_map = None
- # Initialize weights and apply final processing
- self.post_init()
- @add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=QuestionAnsweringModelOutput,
- config_class=_CONFIG_FOR_DOC,
- real_checkpoint=_REAL_CHECKPOINT_FOR_DOC,
- )
- def forward(
- self,
- input_ids: Optional[torch.LongTensor] = None,
- attention_mask: Optional[torch.FloatTensor] = None,
- token_type_ids: Optional[torch.LongTensor] = None,
- position_ids: Optional[torch.LongTensor] = None,
- head_mask: Optional[torch.FloatTensor] = None,
- inputs_embeds: Optional[torch.FloatTensor] = None,
- start_positions: Optional[torch.LongTensor] = None,
- end_positions: Optional[torch.LongTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, QuestionAnsweringModelOutput]:
- r"""
- start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for position (index) of the start of the labelled span for computing the token classification loss.
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
- are not taken into account for computing the loss.
- end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for position (index) of the end of the labelled span for computing the token classification loss.
- Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
- are not taken into account for computing the loss.
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- outputs = self.transformer(
- input_ids,
- attention_mask=attention_mask,
- token_type_ids=token_type_ids,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- sequence_output = outputs[0]
- logits = self.qa_outputs(sequence_output)
- start_logits, end_logits = logits.split(1, dim=-1)
- start_logits = start_logits.squeeze(-1).contiguous()
- end_logits = end_logits.squeeze(-1).contiguous()
- total_loss = None
- if start_positions is not None and end_positions is not None:
- # If we are on multi-GPU, split add a dimension
- if len(start_positions.size()) > 1:
- start_positions = start_positions.squeeze(-1)
- if len(end_positions.size()) > 1:
- end_positions = end_positions.squeeze(-1)
- # sometimes the start/end positions are outside our model inputs, we ignore these terms
- ignored_index = start_logits.size(1)
- start_positions = start_positions.clamp(0, ignored_index)
- end_positions = end_positions.clamp(0, ignored_index)
- loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
- start_loss = loss_fct(start_logits, start_positions)
- end_loss = loss_fct(end_logits, end_positions)
- total_loss = (start_loss + end_loss) / 2
- if not return_dict:
- output = (start_logits, end_logits) + outputs[2:]
- return ((total_loss,) + output) if total_loss is not None else output
- return QuestionAnsweringModelOutput(
- loss=total_loss,
- start_logits=start_logits,
- end_logits=end_logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement