mrinaldi commited on
Commit
bd3e230
·
verified ·
1 Parent(s): 053b86e

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. README.md +28 -0
  2. checkpoint.ckpt +3 -0
  3. config.json +94 -0
  4. matformer_config.json +67 -0
  5. modeling_matformer.py +47 -0
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - matformer
4
+ - custom-model
5
+ library_name: transformers
6
+ ---
7
+
8
+ # Matformer Model
9
+
10
+ Trained using [Matformer](https://github.com/mrinaldi97/matformer).
11
+
12
+ ## Installation
13
+
14
+ ```bash
15
+ pip install git+https://github.com/mrinaldi97/matformer.git
16
+ ```
17
+
18
+ ## Usage
19
+
20
+ ```python
21
+ import torch
22
+ from transformers import AutoModelForMaskedLM
23
+
24
+ model = AutoModelForMaskedLM.from_pretrained(
25
+ "mrinaldi/albertina_test",
26
+ trust_remote_code=True
27
+ )
28
+ ```
checkpoint.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c42c0cf68352b6e0fd4833bb9a5e7d205f7d6f81421f54fe7b2d980969db661
3
+ size 1510669437
config.json ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_checkpoint_path": "../checkpoints_albertone/alibi_muon/mini_albertina_001_20251104_192914_last.ckpt",
3
+ "_matformer_config_dict": {
4
+ "_checkpoint_path": "../checkpoints_albertone/alibi_muon/mini_albertina_001_20251104_192914_last.ckpt",
5
+ "_model_class": "BERTModel",
6
+ "_tokenizer_name": "sapienzanlp/Minerva-350M-base-v1.0",
7
+ "attention_type": [],
8
+ "bias": false,
9
+ "block_size_for_attention": 128,
10
+ "bos_token_id": 1,
11
+ "compile_flexattn": false,
12
+ "custom_layers": {},
13
+ "decoder": null,
14
+ "default_layer": {
15
+ "attn_impl": "flash",
16
+ "ffn_activation": "swiglu",
17
+ "hooks": {},
18
+ "normalization": "rmsnorm",
19
+ "normalization_position": "post",
20
+ "positional_encoding": "alibi",
21
+ "sliding_window_size": null
22
+ },
23
+ "encoder": null,
24
+ "entropy": null,
25
+ "eos_token_id": 2,
26
+ "ffn_factor": 4.0,
27
+ "has_entropy_model": null,
28
+ "has_text_autoencoder": null,
29
+ "hidden_size": 768,
30
+ "is_causal": false,
31
+ "mask_token_id": 32768,
32
+ "masked_substitution_rate": 0.15,
33
+ "max_position_embeddings": 1024,
34
+ "model_class": null,
35
+ "name": "Mini-Albertina-001",
36
+ "num_attention_heads": 12,
37
+ "num_hidden_layers": 12,
38
+ "num_labels": 2,
39
+ "pad_token_id": 0,
40
+ "rms_norm_eps": 1e-06,
41
+ "rope_theta": 10000.0,
42
+ "sliding_type": null,
43
+ "tie_word_embeddings": false,
44
+ "training_objective": "masked",
45
+ "vocab_size": 32769
46
+ },
47
+ "_model_class": "BERTModel",
48
+ "_tokenizer_name": "sapienzanlp/Minerva-350M-base-v1.0",
49
+ "attention_type": [],
50
+ "auto_map": {
51
+ "AutoConfig": "modeling_matformer.MatformerConfig",
52
+ "AutoModel": "modeling_matformer.MatformerModel",
53
+ "AutoModelForMaskedLM": "modeling_matformer.MatformerForMaskedLM"
54
+ },
55
+ "bias": false,
56
+ "block_size_for_attention": 128,
57
+ "bos_token_id": 1,
58
+ "compile_flexattn": false,
59
+ "custom_layers": {},
60
+ "decoder": null,
61
+ "default_layer": {
62
+ "attn_impl": "flash",
63
+ "ffn_activation": "swiglu",
64
+ "hooks": {},
65
+ "normalization": "rmsnorm",
66
+ "normalization_position": "post",
67
+ "positional_encoding": "alibi",
68
+ "sliding_window_size": null
69
+ },
70
+ "encoder": null,
71
+ "entropy": null,
72
+ "eos_token_id": 2,
73
+ "ffn_factor": 4.0,
74
+ "has_entropy_model": null,
75
+ "has_text_autoencoder": null,
76
+ "hidden_size": 768,
77
+ "is_causal": false,
78
+ "mask_token_id": 32768,
79
+ "masked_substitution_rate": 0.15,
80
+ "max_position_embeddings": 1024,
81
+ "model_class": null,
82
+ "model_type": "matformer",
83
+ "name": "Mini-Albertina-001",
84
+ "num_attention_heads": 12,
85
+ "num_hidden_layers": 12,
86
+ "pad_token_id": 0,
87
+ "rms_norm_eps": 1e-06,
88
+ "rope_theta": 10000.0,
89
+ "sliding_type": null,
90
+ "training_objective": "masked",
91
+ "transformers_version": "4.53.0.dev0",
92
+ "use_cache": true,
93
+ "vocab_size": 32769
94
+ }
matformer_config.json ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_class": "BERTModel",
3
+ "model_config": {
4
+ "name": "Mini-Albertina-001",
5
+ "hidden_size": 768,
6
+ "ffn_factor": 4.0,
7
+ "vocab_size": 32769,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "pad_token_id": 0,
11
+ "mask_token_id": 32768,
12
+ "masked_substitution_rate": 0.15,
13
+ "num_hidden_layers": 12,
14
+ "num_attention_heads": 12,
15
+ "tie_word_embeddings": false,
16
+ "rms_norm_eps": 1e-06,
17
+ "attention_type": [],
18
+ "max_position_embeddings": 1024,
19
+ "block_size_for_attention": 128,
20
+ "rope_theta": 10000.0,
21
+ "compile_flexattn": false,
22
+ "bias": false,
23
+ "training_objective": "masked",
24
+ "is_causal": false,
25
+ "default_layer": {
26
+ "attn_impl": "flash",
27
+ "sliding_window_size": null,
28
+ "positional_encoding": "alibi",
29
+ "normalization": "rmsnorm",
30
+ "normalization_position": "post",
31
+ "ffn_activation": "swiglu",
32
+ "hooks": {}
33
+ },
34
+ "custom_layers": {}
35
+ },
36
+ "training": {
37
+ "optimizer": "muon",
38
+ "lr_scheduling": true,
39
+ "lr": 0.0005,
40
+ "final_lr": 2e-05,
41
+ "hold_steps": 0.21,
42
+ "weight_decay": 0.01,
43
+ "scheduler": "custom",
44
+ "gradient_clip_val": 1.0,
45
+ "warmup_steps": 0.05,
46
+ "max_epochs": 1,
47
+ "accumulate_grad_batches": 5,
48
+ "seed": 27,
49
+ "save_every_n_steps": 5000,
50
+ "checkpoint_name": "mini_albertina_001"
51
+ },
52
+ "tokenizer": {
53
+ "type": "huggingface",
54
+ "pretrained_name": "sapienzanlp/Minerva-350M-base-v1.0",
55
+ "varlen_strategy": "unpadding"
56
+ },
57
+ "data": {
58
+ "data_root": "/home/matteo/Albertone/Albertina/Albertina_mdat",
59
+ "batch_size": 48,
60
+ "num_workers": 1,
61
+ "mdat_strategy": "Minerva1024",
62
+ "mdat_view": null
63
+ },
64
+ "save_dir": "./checkpoints_albertone",
65
+ "wandb_project": "Albertone",
66
+ "wandb_run_name": "Mini-Albertina-000"
67
+ }
modeling_matformer.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # modeling_matformer.py
2
+ import os
3
+ import sys
4
+
5
+ matformer_root = os.getenv("MATFORMER_ROOT")
6
+ if matformer_root:
7
+ matformer_root = os.path.abspath(os.path.expanduser(matformer_root))
8
+ if matformer_root not in sys.path:
9
+ sys.path.insert(0, matformer_root)
10
+
11
+ try:
12
+ from matformer.huggingface_integration import (
13
+ MatformerForCausalLM,
14
+ MatformerForMaskedLM,
15
+ MatformerForSequenceClassification,
16
+ MatformerModel,
17
+ MatformerConfig,
18
+ register_matformer
19
+ )
20
+ register_matformer()
21
+ except ImportError as e:
22
+ import subprocess
23
+ import tempfile
24
+
25
+ print("Installing Matformer from GitHub...")
26
+ try:
27
+ subprocess.check_call([
28
+ sys.executable, "-m", "pip", "install",
29
+ "git+https://github.com/mrinaldi97/matformer.git"
30
+ ])
31
+
32
+ from matformer.huggingface_integration import (
33
+ MatformerForCausalLM,
34
+ MatformerForMaskedLM,
35
+ MatformerForSequenceClassification,
36
+ MatformerModel,
37
+ MatformerConfig,
38
+ register_matformer
39
+ )
40
+ register_matformer()
41
+
42
+ except Exception as install_error:
43
+ raise ImportError(
44
+ "Failed to install Matformer. Install manually:\n"
45
+ " pip install git+https://github.com/mrinaldi97/matformer.git\n"
46
+ "Or set MATFORMER_ROOT environment variable"
47
+ ) from install_error