| | import math |
| | from transformers import PretrainedConfig |
| |
|
| |
|
| | class Phi2Config(PretrainedConfig): |
| | model_type = "phi2" |
| | attribute_map = { |
| | "max_position_embeddings": "initial_cos_sin_cache_len", |
| | "hidden_size": "d_embedding", |
| | "num_attention_heads": "n_attn_heads", |
| | "num_hidden_layers": "n_attn_blocks", |
| | } |
| |
|
| | def __init__( |
| | self, |
| | vocab_size: int, |
| | vocab_chunk_for_gpu_efficiency: int, |
| | initial_cos_sin_cache_len: int, |
| | d_embedding: int, |
| | n_attn_blocks: int, |
| | n_attn_heads: int, |
| | use_flash_attn: bool, |
| | use_flash_rotary: bool, |
| | use_fused_dense: bool, |
| | attn_pdrop: float, |
| | embd_pdrop: float, |
| | resid_pdrop: float, |
| | layer_norm_epsilon: float, |
| | weight_initialization_range: float, |
| | tie_word_embeddings: bool, |
| | checkpointing: bool, |
| | **kwargs |
| | ) -> None: |
| | self.vocab_size = ( |
| | math.ceil( |
| | vocab_size / vocab_chunk_for_gpu_efficiency |
| | ) * vocab_chunk_for_gpu_efficiency |
| | ) |
| | self.initial_cos_sin_cache_len = initial_cos_sin_cache_len |
| | self.d_embedding = d_embedding |
| | self.n_attn_blocks = n_attn_blocks |
| | self.n_attn_heads = n_attn_heads |
| | self.use_flash_attn = use_flash_attn |
| | self.use_flash_rotary = use_flash_rotary |
| | self.use_fused_dense = use_fused_dense |
| | self.attn_pdrop = attn_pdrop |
| | self.embd_pdrop = embd_pdrop |
| | self.resid_pdrop = resid_pdrop |
| | self.layer_norm_epsilon = layer_norm_epsilon |
| | self.weight_initialization_range = weight_initialization_range |
| | self.checkpointing = checkpointing |
| |
|
| | super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | phi2_config = Phi2Config() |
| | |
| | |
| | |
| |
|