FrankC0st1e commited on
Commit
a4f2dcb
1 Parent(s): 8fa0de6

fix bug in .py

Browse files
Files changed (2) hide show
  1. configuration_minicpm.py +0 -1
  2. modeling_minicpm.py +11 -11
configuration_minicpm.py CHANGED
@@ -174,7 +174,6 @@ class MiniCPM3Config(PretrainedConfig):
174
  self.use_cache = use_cache
175
  self.rope_theta = rope_theta
176
  self.rope_scaling = rope_scaling
177
- self._rope_scaling_validation()
178
  self.attention_bias = attention_bias
179
  self.attention_dropout = attention_dropout
180
  self.scale_emb = scale_emb
 
174
  self.use_cache = use_cache
175
  self.rope_theta = rope_theta
176
  self.rope_scaling = rope_scaling
 
177
  self.attention_bias = attention_bias
178
  self.attention_dropout = attention_dropout
179
  self.scale_emb = scale_emb
modeling_minicpm.py CHANGED
@@ -48,7 +48,7 @@ from transformers.utils import (
48
  replace_return_docstrings,
49
  )
50
  from transformers.utils.import_utils import is_torch_fx_available
51
- from .configuration_minicpm import MiniCPMConfig
52
  import re
53
 
54
  try:
@@ -69,7 +69,7 @@ if is_torch_fx_available():
69
 
70
  logger = logging.get_logger(__name__)
71
 
72
- _CONFIG_FOR_DOC = "MiniCPMConfig"
73
 
74
 
75
  def _get_unpad_data(attention_mask):
@@ -331,7 +331,7 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
331
  class MiniCPMAttention(nn.Module):
332
  """Multi-headed attention from 'Attention Is All You Need' paper"""
333
 
334
- def __init__(self, config: MiniCPMConfig, layer_idx: Optional[int] = None):
335
  super().__init__()
336
  self.config = config
337
  self.layer_idx = layer_idx
@@ -784,7 +784,7 @@ class MiniCPMSdpaAttention(MiniCPMAttention):
784
  if output_attentions:
785
  # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
786
  logger.warning_once(
787
- "MiniCPMModel is using MiniCPMSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
788
  'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
789
  )
790
  return super().forward(
@@ -884,7 +884,7 @@ MINICPM_ATTENTION_CLASSES = {
884
 
885
 
886
  class MiniCPMDecoderLayer(nn.Module):
887
- def __init__(self, config: MiniCPMConfig, layer_idx: int):
888
  super().__init__()
889
  self.hidden_size = config.hidden_size
890
  self.self_attn = MINICPM_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
@@ -968,7 +968,7 @@ MINICPM_START_DOCSTRING = r"""
968
  and behavior.
969
 
970
  Parameters:
971
- config ([`MiniCPMConfig`]):
972
  Model configuration class with all the parameters of the model. Initializing with a config file does not
973
  load the weights associated with the model, only the configuration. Check out the
974
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
@@ -980,7 +980,7 @@ MINICPM_START_DOCSTRING = r"""
980
  MINICPM_START_DOCSTRING,
981
  )
982
  class MiniCPM3PreTrainedModel(PreTrainedModel):
983
- config_class = MiniCPMConfig
984
  base_model_prefix = "model"
985
  supports_gradient_checkpointing = True
986
  _no_split_modules = ["MiniCPMDecoderLayer"]
@@ -1080,10 +1080,10 @@ class MiniCPM3Model(MiniCPM3PreTrainedModel):
1080
  Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MiniCPMDecoderLayer`]
1081
 
1082
  Args:
1083
- config: MiniCPMConfig
1084
  """
1085
 
1086
- def __init__(self, config: MiniCPMConfig):
1087
  super().__init__(config)
1088
  self.padding_idx = config.pad_token_id
1089
  self.vocab_size = config.vocab_size
@@ -1244,7 +1244,7 @@ class MiniCPM3ForCausalLM(MiniCPM3PreTrainedModel):
1244
 
1245
  def __init__(self, config):
1246
  super().__init__(config)
1247
- self.model = MiniCPMModel(config)
1248
  self.vocab_size = config.vocab_size
1249
  self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1250
 
@@ -1469,7 +1469,7 @@ class MiniCPM3ForSequenceClassification(MiniCPM3PreTrainedModel):
1469
  def __init__(self, config):
1470
  super().__init__(config)
1471
  self.num_labels = config.num_labels
1472
- self.model = MiniCPMModel(config)
1473
  self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1474
 
1475
  # Initialize weights and apply final processing
 
48
  replace_return_docstrings,
49
  )
50
  from transformers.utils.import_utils import is_torch_fx_available
51
+ from .configuration_minicpm import MiniCPM3Config
52
  import re
53
 
54
  try:
 
69
 
70
  logger = logging.get_logger(__name__)
71
 
72
+ _CONFIG_FOR_DOC = "MiniCPM3Config"
73
 
74
 
75
  def _get_unpad_data(attention_mask):
 
331
  class MiniCPMAttention(nn.Module):
332
  """Multi-headed attention from 'Attention Is All You Need' paper"""
333
 
334
+ def __init__(self, config: MiniCPM3Config, layer_idx: Optional[int] = None):
335
  super().__init__()
336
  self.config = config
337
  self.layer_idx = layer_idx
 
784
  if output_attentions:
785
  # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
786
  logger.warning_once(
787
+ "MiniCPM3Model is using MiniCPMSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
788
  'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
789
  )
790
  return super().forward(
 
884
 
885
 
886
  class MiniCPMDecoderLayer(nn.Module):
887
+ def __init__(self, config: MiniCPM3Config, layer_idx: int):
888
  super().__init__()
889
  self.hidden_size = config.hidden_size
890
  self.self_attn = MINICPM_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
 
968
  and behavior.
969
 
970
  Parameters:
971
+ config ([`MiniCPM3Config`]):
972
  Model configuration class with all the parameters of the model. Initializing with a config file does not
973
  load the weights associated with the model, only the configuration. Check out the
974
  [`~PreTrainedModel.from_pretrained`] method to load the model weights.
 
980
  MINICPM_START_DOCSTRING,
981
  )
982
  class MiniCPM3PreTrainedModel(PreTrainedModel):
983
+ config_class = MiniCPM3Config
984
  base_model_prefix = "model"
985
  supports_gradient_checkpointing = True
986
  _no_split_modules = ["MiniCPMDecoderLayer"]
 
1080
  Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MiniCPMDecoderLayer`]
1081
 
1082
  Args:
1083
+ config: MiniCPM3Config
1084
  """
1085
 
1086
+ def __init__(self, config: MiniCPM3Config):
1087
  super().__init__(config)
1088
  self.padding_idx = config.pad_token_id
1089
  self.vocab_size = config.vocab_size
 
1244
 
1245
  def __init__(self, config):
1246
  super().__init__(config)
1247
+ self.model = MiniCPM3Model(config)
1248
  self.vocab_size = config.vocab_size
1249
  self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1250
 
 
1469
  def __init__(self, config):
1470
  super().__init__(config)
1471
  self.num_labels = config.num_labels
1472
+ self.model = MiniCPM3Model(config)
1473
  self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1474
 
1475
  # Initialize weights and apply final processing