juliendenize commited on
Commit
14145b4
·
verified ·
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ tekken.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+
5
+ # mistralai/Ministral-3-14B-Instruct-2512
6
+
7
+ For now you can only launch via vLLM and Transformers with Julien Denize branches
8
+ - [vLLM](#vllm) branch: https://github.com/juliendenize/vllm/tree/add_llama_4_scaling_support_to_llama
9
+ - [Transformers](#transformers) branch: https://github.com/juliendenize/transformers/tree/add_llama_4_scaling
10
+
11
+ The architecture change in comparison with Mistral-Small-3.2 is using Yarn with llama4 scaling.
12
+
13
+ Please note that 3B also has tied embeddings (no output layer) to reduce the number of weights. This is not the case of 8B and 14B.
14
+
15
+ ## vLLM
16
+
17
+ 1. install vLLM
18
+
19
+ ```sh
20
+ VLLM_USE_PRECOMPILED=1 uv pip install git+https://github.com/vllm-project/vllm.git
21
+ ```
22
+
23
+ 2. Launch server
24
+
25
+ ```sh
26
+ vllm serve mistralai/Ministral-3-14B-Instruct-2512 \
27
+ --tokenizer_mode mistral --config_format mistral \
28
+ --load_format mistral --tool-call-parser mistral \
29
+ --enable-auto-tool-choice --tensor-parallel-size 1
30
+ ```
31
+
32
+ 3. test it
33
+
34
+ ```python
35
+ from datetime import datetime, timedelta
36
+
37
+ from openai import OpenAI
38
+ from huggingface_hub import hf_hub_download
39
+
40
+ # Modify OpenAI's API key and API base to use vLLM's API server.
41
+ openai_api_key = "EMPTY"
42
+ openai_api_base = "http://localhost:8000/v1"
43
+
44
+ TEMP = 0.15
45
+ MAX_TOK = 262144
46
+
47
+ client = OpenAI(
48
+ api_key=openai_api_key,
49
+ base_url=openai_api_base,
50
+ )
51
+
52
+ models = client.models.list()
53
+ model = models.data[0].id
54
+
55
+
56
+ def load_system_prompt() -> str:
57
+ file_path = hf_hub_download(repo_id="mistralai/Ministral-3-14B-Instruct-2512", filename="SYSTEM_PROMPT.txt")
58
+ with open(file_path, "r") as file:
59
+ system_prompt = file.read()
60
+ today = datetime.today().strftime("%Y-%m-%d")
61
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
62
+ return system_prompt.format(today=today, yesterday=yesterday)
63
+
64
+
65
+ SYSTEM_PROMPT = load_system_prompt(model_id, "SYSTEM_PROMPT.txt")
66
+ image_url = "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438"
67
+
68
+ messages = [
69
+ {"role": "system", "content": SYSTEM_PROMPT},
70
+ {
71
+ "role": "user",
72
+ "content": [
73
+ {
74
+ "type": "text",
75
+ "text": "What action do you think I should take in this situation? List all the possible actions and explain why you think they are good or bad.",
76
+ },
77
+ {"type": "image_url", "image_url": {"url": image_url}},
78
+ ],
79
+ },
80
+ ]
81
+
82
+
83
+ response = client.chat.completions.create(
84
+ model=model,
85
+ messages=messages,
86
+ temperature=TEMP,
87
+ max_tokens=MAX_TOK,
88
+ )
89
+
90
+ print(response.choices[0].message.content)
91
+ ```
92
+
93
+ ## Transformers
94
+
95
+
96
+ 1. install Transformers
97
+
98
+ ```sh
99
+ pip install git+https://github.com/juliendenize/transformers@add_llama_4_scaling
100
+ ```
101
+
102
+ or clone
103
+
104
+ ```
105
+ git clone git@github.com:juliendenize/transformers.git transformers_juliendenize
106
+ cd transformers_juliendenize
107
+ git checkout add_llama_4_scaling
108
+ ```
109
+
110
+ 2. test (with mistral-common)
111
+
112
+ ```sh
113
+ pip install mistral-common[image]
114
+ ```
115
+
116
+ ```python
117
+ from datetime import datetime, timedelta
118
+ import torch
119
+
120
+ from huggingface_hub import hf_hub_download
121
+ from transformers import Mistral3ForConditionalGeneration, AutoTokenizer
122
+
123
+
124
+ def load_system_prompt() -> str:
125
+ file_path = hf_hub_download(repo_id="mistralai/Ministral-3-14B-Instruct-2512", filename="SYSTEM_PROMPT.txt")
126
+ with open(file_path, "r") as file:
127
+ system_prompt = file.read()
128
+ today = datetime.today().strftime("%Y-%m-%d")
129
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
130
+ return system_prompt.format(today=today, yesterday=yesterday)
131
+
132
+
133
+ SYSTEM_PROMPT = load_system_prompt()
134
+
135
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Ministral-3-14B-Instruct-2512", tokenizer_type="mistral")
136
+
137
+ model = Mistral3ForConditionalGeneration.from_pretrained(
138
+ "mistralai/Ministral-3-14B-Instruct-2512", torch_dtype=torch.bfloat16, device_map="auto"
139
+ ).eval()
140
+
141
+ image_url = "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438"
142
+
143
+ messages = [
144
+ {"role": "system", "content": SYSTEM_PROMPT},
145
+ {
146
+ "role": "user",
147
+ "content": [
148
+ {
149
+ "type": "text",
150
+ "text": "What action do you think I should take in this situation? List all the possible actions and explain why you think they are good or bad.",
151
+ },
152
+ {"type": "image_url", "image_url": {"url": image_url}},
153
+ ],
154
+ },
155
+ ]
156
+
157
+ tokenized = tokenizer.apply_chat_template(messages, return_dict=True)
158
+
159
+ input_ids = torch.tensor(tokenized.input_ids, device="cuda").unsqueeze(0)
160
+ attention_mask = torch.tensor(tokenized.attention_mask, device="cuda").unsqueeze(0)
161
+ pixel_values = torch.tensor(
162
+ tokenized.pixel_values[0], dtype=torch.bfloat16, device="cuda"
163
+ ).unsqueeze(0)
164
+ image_sizes = torch.tensor(pixel_values.shape[-2:], device="cuda").unsqueeze(0)
165
+
166
+ with torch.inference_mode():
167
+ output = model.generate(
168
+ input_ids=input_ids,
169
+ attention_mask=attention_mask,
170
+ pixel_values=pixel_values,
171
+ image_sizes=image_sizes,
172
+ max_new_tokens=1000,
173
+ )[0]
174
+
175
+ decoded_output = tokenizer.decode(output, skip_special_tokens=True)
176
+ print(decoded_output)
177
+ ```
178
+
179
+ 3. test (without mistral-common)
180
+
181
+ ```python
182
+ from datetime import datetime, timedelta
183
+ import torch
184
+
185
+ from huggingface_hub import hf_hub_download
186
+ from transformers import Mistral3ForConditionalGeneration, AutoProcessor
187
+
188
+
189
+ def load_system_prompt() -> str:
190
+ file_path = hf_hub_download(repo_id="mistralai/Ministral-3-14B-Instruct-2512", filename="SYSTEM_PROMPT.txt")
191
+ with open(file_path, "r") as file:
192
+ system_prompt = file.read()
193
+ today = datetime.today().strftime("%Y-%m-%d")
194
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
195
+ return system_prompt.format(name="mistralai/Ministral-3-14B-Instruct-2512".split("/")[-1], today=today, yesterday=yesterday)
196
+
197
+
198
+ SYSTEM_PROMPT = load_system_prompt()
199
+
200
+ processor = AutoProcessor.from_pretrained("mistralai/Ministral-3-14B-Instruct-2512")
201
+
202
+ model = Mistral3ForConditionalGeneration.from_pretrained(
203
+ "mistralai/Ministral-3-14B-Instruct-2512", torch_dtype=torch.bfloat16, device_map="auto"
204
+ ).eval()
205
+
206
+ image_url = "https://static.wikia.nocookie.net/essentialsdocs/images/7/70/Battle.png/revision/latest?cb=20220523172438"
207
+
208
+ messages = [
209
+ {"role": "system", "content": [
210
+ {"type": "text", "text": SYSTEM_PROMPT}
211
+ ]},
212
+ {
213
+ "role": "user",
214
+ "content": [
215
+ {
216
+ "type": "text",
217
+ "text": "What action do you think I should take in this situation? List all the possible actions and explain why you think they are good or bad.",
218
+ },
219
+ {"type": "image", "url": image_url},
220
+ ],
221
+ },
222
+ ]
223
+
224
+ inputs = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt").to(device=model.device, dtype=torch.bfloat16)
225
+
226
+ with torch.inference_mode():
227
+ output = model.generate(
228
+ **inputs,
229
+ max_new_tokens=1000,
230
+ )
231
+
232
+ decoded_output = processor.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
233
+ print(decoded_output)
234
+ ```
SYSTEM_PROMPT.txt ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are Ministral-3-14B-Instruct-2512, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.
2
+ You power an AI assistant called Le Chat.
3
+ Your knowledge base was last updated on 2023-10-01.
4
+ The current date is {today}.
5
+
6
+ When you're not sure about some information or when the user's request requires up-to-date or specific data, you must use the available tools to fetch the information. Do not hesitate to use tools whenever they can provide a more accurate or complete response. If no relevant tools are available, then clearly state that you don't have the information and avoid making up anything.
7
+ If the user's question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?").
8
+ You are always very attentive to dates, in particular you try to resolve dates (e.g. "yesterday" is {yesterday}) and when asked about information at specific dates, you discard information that is at another date.
9
+ You follow these instructions in all languages, and always respond to the user in the language they use or request.
10
+ Next sections describe the capabilities that you have.
11
+
12
+ # WEB BROWSING INSTRUCTIONS
13
+
14
+ You cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat.
15
+
16
+ # MULTI-MODAL INSTRUCTIONS
17
+
18
+ You have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos.
19
+ You cannot read nor transcribe audio files or videos.
20
+
21
+ # TOOL CALLING INSTRUCTIONS
22
+
23
+ You may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations:
24
+
25
+ 1. When the request requires up-to-date information.
26
+ 2. When the request requires specific data that you do not have in your knowledge base.
27
+ 3. When the request involves actions that you cannot perform without tools.
28
+
29
+ Always prioritize using tools to provide the most accurate and helpful response. If tools are not available, inform the user that you cannot perform the requested action at the moment.
chat_template.jinja ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {#- Default system message if no system prompt is passed. #}
2
+ {%- set default_system_message = 'You are Ministral-3-14B-Instruct-2512, a Large Language Model (LLM) created by Mistral AI, a French startup headquartered in Paris.\nYou power an AI assistant called Le Chat.\nYour knowledge base was last updated on 2023-10-01.\nThe current date is {today}.\n\nWhen you\'re not sure about some information or when the user\'s request requires up-to-date or specific data, you must use the available tools to fetch the information. Do not hesitate to use tools whenever they can provide a more accurate or complete response. If no relevant tools are available, then clearly state that you don\'t have the information and avoid making up anything.\nIf the user\'s question is not clear, ambiguous, or does not provide enough context for you to accurately answer the question, you do not try to answer it right away and you rather ask the user to clarify their request (e.g. "What are some good restaurants around me?" => "Where are you?" or "When is the next flight to Tokyo" => "Where do you travel from?").\nYou are always very attentive to dates, in particular you try to resolve dates (e.g. "yesterday" is {yesterday}) and when asked about information at specific dates, you discard information that is at another date.\nYou follow these instructions in all languages, and always respond to the user in the language they use or request.\nNext sections describe the capabilities that you have.\n\n# WEB BROWSING INSTRUCTIONS\n\nYou cannot perform any web search or access internet to open URLs, links etc. If it seems like the user is expecting you to do so, you clarify the situation and ask the user to copy paste the text directly in the chat.\n\n# MULTI-MODAL INSTRUCTIONS\n\nYou have the ability to read images, but you cannot generate images. You also cannot transcribe audio files or videos.\nYou cannot read nor transcribe audio files or videos.\n\n# TOOL CALLING INSTRUCTIONS\n\nYou may have access to tools that you can use to fetch information or perform actions. You must use these tools in the following situations:\n\n1. When the request requires up-to-date information.\n2. When the request requires specific data that you do not have in your knowledge base.\n3. When the request involves actions that you cannot perform without tools.\n\nAlways prioritize using tools to provide the most accurate and helpful response. If tools are not available, inform the user that you cannot perform the requested action at the moment.' %}
3
+
4
+ {#- Begin of sequence token. #}
5
+ {{- bos_token }}
6
+
7
+ {#- Handle system prompt if it exists. #}
8
+ {#- System prompt supports text content or text chunks. #}
9
+ {%- if messages[0]['role'] == 'system' %}
10
+ {{- '[SYSTEM_PROMPT]' -}}
11
+ {%- if messages[0]['content'] is string %}
12
+ {{- messages[0]['content'] -}}
13
+ {%- else %}
14
+ {%- for block in messages[0]['content'] %}
15
+ {%- if block['type'] == 'text' %}
16
+ {{- block['text'] }}
17
+ {%- else %}
18
+ {{- raise_exception('Only text chunks are supported in system message contents.') }}
19
+ {%- endif %}
20
+ {%- endfor %}
21
+ {%- endif %}
22
+ {{- '[/SYSTEM_PROMPT]' -}}
23
+ {%- set loop_messages = messages[1:] %}
24
+ {%- else %}
25
+ {%- set loop_messages = messages %}
26
+ {%- if default_system_message != '' %}
27
+ {{- '[SYSTEM_PROMPT]' + default_system_message + '[/SYSTEM_PROMPT]' }}
28
+ {%- endif %}
29
+ {%- endif %}
30
+
31
+
32
+ {#- Tools definition #}
33
+ {%- set tools_definition = '' %}
34
+ {%- set has_tools = false %}
35
+ {%- if tools is defined and tools is not none and tools|length > 0 %}
36
+ {%- set has_tools = true %}
37
+ {%- set tools_definition = '[AVAILABLE_TOOLS]' + (tools| tojson) + '[/AVAILABLE_TOOLS]' %}
38
+ {{- tools_definition }}
39
+ {%- endif %}
40
+
41
+ {#- Checks for alternating user/assistant messages. #}
42
+ {%- set ns = namespace(index=0) %}
43
+ {%- for message in loop_messages %}
44
+ {%- if message.role == 'user' or (message.role == 'assistant' and (message.tool_calls is not defined or message.tool_calls is none or message.tool_calls | length == 0)) %}
45
+ {%- if (message['role'] == 'user') != (ns.index % 2 == 0) %}
46
+ {{- raise_exception('After the optional system message, conversation roles must alternate user and assistant roles except for tool calls and results.') }}
47
+ {%- endif %}
48
+ {%- set ns.index = ns.index + 1 %}
49
+ {%- endif %}
50
+ {%- endfor %}
51
+
52
+ {#- Handle conversation messages. #}
53
+ {%- for message in loop_messages %}
54
+
55
+ {#- User messages supports text content or text and image chunks. #}
56
+ {%- if message['role'] == 'user' %}
57
+ {%- if message['content'] is string %}
58
+ {{- '[INST]' + message['content'] + '[/INST]' }}
59
+ {%- elif message['content'] | length > 0 %}
60
+ {{- '[INST]' }}
61
+ {%- if message['content'] | length == 2 %}
62
+ {%- set blocks = message['content'] | sort(attribute='type') %}
63
+ {%- else %}
64
+ {%- set blocks = message['content'] %}
65
+ {%- endif %}
66
+ {%- for block in blocks %}
67
+ {%- if block['type'] == 'text' %}
68
+ {{- block['text'] }}
69
+ {%- elif block['type'] in ['image', 'image_url'] %}
70
+ {{- '[IMG]' }}
71
+ {%- else %}
72
+ {{- raise_exception('Only text, image and image_url chunks are supported in user message content.') }}
73
+ {%- endif %}
74
+ {%- endfor %}
75
+ {{- '[/INST]' }}
76
+ {%- else %}
77
+ {{- raise_exception('User message must have a string or a list of chunks in content') }}
78
+ {%- endif %}
79
+
80
+ {#- Assistant messages supports text content or text and image chunks. #}
81
+ {%- elif message['role'] == 'assistant' %}
82
+ {%- if message['content'] is not none and message['content'] | length > 0 and message['tool_calls'] is defined and message['tool_calls'] is not none and message['tool_calls'] | length > 0 %}
83
+ {{- raise_exception('Assistant message cannot have both content and tool calls.') }}
84
+ {%- endif %}
85
+
86
+ {%- if message['content'] is string %}
87
+ {{- message['content'] }}
88
+ {%- elif message['content'] | length > 0 %}
89
+ {%- for block in message['content'] %}
90
+ {%- if block['type'] == 'text' %}
91
+ {{- block['text'] }}
92
+ {%- else %}
93
+ {{- raise_exception('Only text chunks are supported in assistant message contents.') }}
94
+ {%- endif %}
95
+ {%- endfor %}
96
+ {%- elif message['tool_calls'] is defined and message['tool_calls'] is not none and message['tool_calls'] | length > 0 %}
97
+ {%- for tool in message['tool_calls'] %}
98
+ {%- set arguments = tool['function']['arguments'] %}
99
+ {%- if arguments is not string %}
100
+ {%- set arguments = arguments|tojson|safe %}
101
+ {%- elif arguments == '' %}
102
+ {%- set arguments = '{}' %}
103
+ {%- endif %}
104
+ {{- '[TOOL_CALLS]' + tool['function']['name'] + '[ARGS]' + arguments }}
105
+ {%- endfor %}
106
+ {%- else %}
107
+ {{- raise_exception('Assistant message must have a string or a list of chunks in content or a list of tool calls.') }}
108
+ {%- endif %}
109
+
110
+ {#- End of sequence token for each assistant messages. #}
111
+ {{- eos_token }}
112
+
113
+ {#- Tool messages only supports text content. #}
114
+ {%- elif message['role'] == 'tool' %}
115
+ {{- '[TOOL_RESULTS]' + message['content']|string + '[/TOOL_RESULTS]' }}
116
+
117
+ {#- Raise exception for unsupported roles. #}
118
+ {%- else %}
119
+ {{- raise_exception('Only user, assistant and tool roles are supported, got ' + message) }}
120
+ {%- endif %}
121
+ {%- endfor %}
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Mistral3ForConditionalGeneration"
4
+ ],
5
+ "dtype": "bfloat16",
6
+ "image_token_index": 10,
7
+ "model_type": "mistral3",
8
+ "multimodal_projector_bias": false,
9
+ "projector_hidden_act": "gelu",
10
+ "spatial_merge_size": 2,
11
+ "text_config": {
12
+ "attention_dropout": 0.0,
13
+ "head_dim": 128,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 5120,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 16384,
18
+ "max_position_embeddings": 262144,
19
+ "model_type": "mistral",
20
+ "mscale": 1,
21
+ "num_attention_heads": 32,
22
+ "num_hidden_layers": 40,
23
+ "num_key_value_heads": 8,
24
+ "rms_norm_eps": 1e-05,
25
+ "rope_parameters": {
26
+ "beta_fast": 32.0,
27
+ "beta_slow": 1.0,
28
+ "factor": 16.0,
29
+ "llama_4_scaling_beta": 0.1,
30
+ "mscale_all_dim": 1,
31
+ "original_max_position_embeddings": 16384,
32
+ "rope_theta": 1000000000.0,
33
+ "rope_type": "yarn",
34
+ "type": "yarn"
35
+ },
36
+ "sliding_window": null,
37
+ "use_cache": true,
38
+ "vocab_size": 131072
39
+ },
40
+ "transformers_version": "5.0.0.dev0",
41
+ "vision_config": {
42
+ "attention_dropout": 0.0,
43
+ "head_dim": 64,
44
+ "hidden_act": "silu",
45
+ "hidden_size": 1024,
46
+ "image_size": 1540,
47
+ "initializer_range": 0.02,
48
+ "intermediate_size": 4096,
49
+ "model_type": "pixtral",
50
+ "num_attention_heads": 16,
51
+ "num_channels": 3,
52
+ "num_hidden_layers": 24,
53
+ "patch_size": 14,
54
+ "rope_parameters": {
55
+ "rope_theta": 10000.0,
56
+ "rope_type": "default"
57
+ },
58
+ "rope_theta": 10000.0
59
+ },
60
+ "vision_feature_layer": -1
61
+ }
consolidated.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52c3772c85a9fdd26ac51a2f42aae24316f7fc797a2e95fa5e45837876897b01
3
+ size 27890132928
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 262144,
5
+ "pad_token_id": 11,
6
+ "transformers_version": "5.0.0.dev0"
7
+ }
model-00001-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a381935aa826a6960e752ab889f51b26870fa4ae34580dc5d33bba3b616a1e3b
3
+ size 4925537056
model-00002-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71f44e7d6408e2933ecf1fe007be40993c1ce2f5a6dec4a73ac85d8d6d4de0b
3
+ size 4865565920
model-00003-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d680c111fee9689b2c170ef058ead26887b88816f05b7701363754e23caa771
3
+ size 4865565968
model-00004-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7d394bc2e748439194f5ccbea3900e31987f5e740cfdef24c94a3ada1097290
3
+ size 4865565968
model-00005-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:727054c31319d016cce95822b67d0dfee7b67793f59c9d1fd171931692bee12c
3
+ size 4865565968
model-00006-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56eb280079466b136fd33638f1b3095cfa9fdb84006a23b36c8944dc7156fdb7
3
+ size 3502340328
model.safetensors.index.json ADDED
@@ -0,0 +1,593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 13945031680,
4
+ "total_size": 27890063360
5
+ },
6
+ "weight_map": {
7
+ "language_model.lm_head.weight": "model-00006-of-00006.safetensors",
8
+ "language_model.model.embed_tokens.weight": "model-00001-of-00006.safetensors",
9
+ "language_model.model.layers.0.input_layernorm.weight": "model-00001-of-00006.safetensors",
10
+ "language_model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
11
+ "language_model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
12
+ "language_model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
13
+ "language_model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
14
+ "language_model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
15
+ "language_model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
16
+ "language_model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
17
+ "language_model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
18
+ "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00006.safetensors",
19
+ "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
20
+ "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
21
+ "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
22
+ "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
23
+ "language_model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
24
+ "language_model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
25
+ "language_model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
26
+ "language_model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
27
+ "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
28
+ "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
29
+ "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
30
+ "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
31
+ "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
32
+ "language_model.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
33
+ "language_model.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
34
+ "language_model.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
35
+ "language_model.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
36
+ "language_model.model.layers.11.input_layernorm.weight": "model-00002-of-00006.safetensors",
37
+ "language_model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
38
+ "language_model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
39
+ "language_model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
40
+ "language_model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
41
+ "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
42
+ "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
43
+ "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
44
+ "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
45
+ "language_model.model.layers.12.input_layernorm.weight": "model-00003-of-00006.safetensors",
46
+ "language_model.model.layers.12.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
47
+ "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
48
+ "language_model.model.layers.12.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
49
+ "language_model.model.layers.12.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
50
+ "language_model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
51
+ "language_model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
52
+ "language_model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
53
+ "language_model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
54
+ "language_model.model.layers.13.input_layernorm.weight": "model-00003-of-00006.safetensors",
55
+ "language_model.model.layers.13.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
56
+ "language_model.model.layers.13.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
57
+ "language_model.model.layers.13.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
58
+ "language_model.model.layers.13.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
59
+ "language_model.model.layers.13.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
60
+ "language_model.model.layers.13.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
61
+ "language_model.model.layers.13.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
62
+ "language_model.model.layers.13.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
63
+ "language_model.model.layers.14.input_layernorm.weight": "model-00003-of-00006.safetensors",
64
+ "language_model.model.layers.14.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
65
+ "language_model.model.layers.14.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
66
+ "language_model.model.layers.14.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
67
+ "language_model.model.layers.14.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
68
+ "language_model.model.layers.14.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
69
+ "language_model.model.layers.14.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
70
+ "language_model.model.layers.14.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
71
+ "language_model.model.layers.14.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
72
+ "language_model.model.layers.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
73
+ "language_model.model.layers.15.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
74
+ "language_model.model.layers.15.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
75
+ "language_model.model.layers.15.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
76
+ "language_model.model.layers.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
77
+ "language_model.model.layers.15.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
78
+ "language_model.model.layers.15.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
79
+ "language_model.model.layers.15.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
80
+ "language_model.model.layers.15.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
81
+ "language_model.model.layers.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
82
+ "language_model.model.layers.16.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
83
+ "language_model.model.layers.16.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
84
+ "language_model.model.layers.16.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
85
+ "language_model.model.layers.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
86
+ "language_model.model.layers.16.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
87
+ "language_model.model.layers.16.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
88
+ "language_model.model.layers.16.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
89
+ "language_model.model.layers.16.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
90
+ "language_model.model.layers.17.input_layernorm.weight": "model-00003-of-00006.safetensors",
91
+ "language_model.model.layers.17.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
92
+ "language_model.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
93
+ "language_model.model.layers.17.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
94
+ "language_model.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
95
+ "language_model.model.layers.17.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
96
+ "language_model.model.layers.17.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
97
+ "language_model.model.layers.17.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
98
+ "language_model.model.layers.17.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
99
+ "language_model.model.layers.18.input_layernorm.weight": "model-00003-of-00006.safetensors",
100
+ "language_model.model.layers.18.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
101
+ "language_model.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
102
+ "language_model.model.layers.18.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
103
+ "language_model.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
104
+ "language_model.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
105
+ "language_model.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
106
+ "language_model.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
107
+ "language_model.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
108
+ "language_model.model.layers.19.input_layernorm.weight": "model-00003-of-00006.safetensors",
109
+ "language_model.model.layers.19.mlp.down_proj.weight": "model-00003-of-00006.safetensors",
110
+ "language_model.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
111
+ "language_model.model.layers.19.mlp.up_proj.weight": "model-00003-of-00006.safetensors",
112
+ "language_model.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
113
+ "language_model.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
114
+ "language_model.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
115
+ "language_model.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
116
+ "language_model.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
117
+ "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00006.safetensors",
118
+ "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
119
+ "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
120
+ "language_model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
121
+ "language_model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
122
+ "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
123
+ "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
124
+ "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
125
+ "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
126
+ "language_model.model.layers.20.input_layernorm.weight": "model-00004-of-00006.safetensors",
127
+ "language_model.model.layers.20.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
128
+ "language_model.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00006.safetensors",
129
+ "language_model.model.layers.20.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
130
+ "language_model.model.layers.20.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
131
+ "language_model.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00006.safetensors",
132
+ "language_model.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00006.safetensors",
133
+ "language_model.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00006.safetensors",
134
+ "language_model.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00006.safetensors",
135
+ "language_model.model.layers.21.input_layernorm.weight": "model-00004-of-00006.safetensors",
136
+ "language_model.model.layers.21.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
137
+ "language_model.model.layers.21.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
138
+ "language_model.model.layers.21.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
139
+ "language_model.model.layers.21.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
140
+ "language_model.model.layers.21.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
141
+ "language_model.model.layers.21.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
142
+ "language_model.model.layers.21.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
143
+ "language_model.model.layers.21.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
144
+ "language_model.model.layers.22.input_layernorm.weight": "model-00004-of-00006.safetensors",
145
+ "language_model.model.layers.22.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
146
+ "language_model.model.layers.22.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
147
+ "language_model.model.layers.22.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
148
+ "language_model.model.layers.22.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
149
+ "language_model.model.layers.22.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
150
+ "language_model.model.layers.22.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
151
+ "language_model.model.layers.22.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
152
+ "language_model.model.layers.22.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
153
+ "language_model.model.layers.23.input_layernorm.weight": "model-00004-of-00006.safetensors",
154
+ "language_model.model.layers.23.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
155
+ "language_model.model.layers.23.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
156
+ "language_model.model.layers.23.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
157
+ "language_model.model.layers.23.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
158
+ "language_model.model.layers.23.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
159
+ "language_model.model.layers.23.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
160
+ "language_model.model.layers.23.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
161
+ "language_model.model.layers.23.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
162
+ "language_model.model.layers.24.input_layernorm.weight": "model-00004-of-00006.safetensors",
163
+ "language_model.model.layers.24.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
164
+ "language_model.model.layers.24.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
165
+ "language_model.model.layers.24.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
166
+ "language_model.model.layers.24.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
167
+ "language_model.model.layers.24.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
168
+ "language_model.model.layers.24.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
169
+ "language_model.model.layers.24.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
170
+ "language_model.model.layers.24.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
171
+ "language_model.model.layers.25.input_layernorm.weight": "model-00004-of-00006.safetensors",
172
+ "language_model.model.layers.25.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
173
+ "language_model.model.layers.25.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
174
+ "language_model.model.layers.25.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
175
+ "language_model.model.layers.25.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
176
+ "language_model.model.layers.25.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
177
+ "language_model.model.layers.25.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
178
+ "language_model.model.layers.25.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
179
+ "language_model.model.layers.25.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
180
+ "language_model.model.layers.26.input_layernorm.weight": "model-00004-of-00006.safetensors",
181
+ "language_model.model.layers.26.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
182
+ "language_model.model.layers.26.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
183
+ "language_model.model.layers.26.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
184
+ "language_model.model.layers.26.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
185
+ "language_model.model.layers.26.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
186
+ "language_model.model.layers.26.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
187
+ "language_model.model.layers.26.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
188
+ "language_model.model.layers.26.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
189
+ "language_model.model.layers.27.input_layernorm.weight": "model-00004-of-00006.safetensors",
190
+ "language_model.model.layers.27.mlp.down_proj.weight": "model-00004-of-00006.safetensors",
191
+ "language_model.model.layers.27.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
192
+ "language_model.model.layers.27.mlp.up_proj.weight": "model-00004-of-00006.safetensors",
193
+ "language_model.model.layers.27.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
194
+ "language_model.model.layers.27.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
195
+ "language_model.model.layers.27.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
196
+ "language_model.model.layers.27.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
197
+ "language_model.model.layers.27.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
198
+ "language_model.model.layers.28.input_layernorm.weight": "model-00005-of-00006.safetensors",
199
+ "language_model.model.layers.28.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
200
+ "language_model.model.layers.28.mlp.gate_proj.weight": "model-00004-of-00006.safetensors",
201
+ "language_model.model.layers.28.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
202
+ "language_model.model.layers.28.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
203
+ "language_model.model.layers.28.self_attn.k_proj.weight": "model-00004-of-00006.safetensors",
204
+ "language_model.model.layers.28.self_attn.o_proj.weight": "model-00004-of-00006.safetensors",
205
+ "language_model.model.layers.28.self_attn.q_proj.weight": "model-00004-of-00006.safetensors",
206
+ "language_model.model.layers.28.self_attn.v_proj.weight": "model-00004-of-00006.safetensors",
207
+ "language_model.model.layers.29.input_layernorm.weight": "model-00005-of-00006.safetensors",
208
+ "language_model.model.layers.29.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
209
+ "language_model.model.layers.29.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
210
+ "language_model.model.layers.29.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
211
+ "language_model.model.layers.29.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
212
+ "language_model.model.layers.29.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
213
+ "language_model.model.layers.29.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
214
+ "language_model.model.layers.29.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
215
+ "language_model.model.layers.29.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
216
+ "language_model.model.layers.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
217
+ "language_model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00006.safetensors",
218
+ "language_model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
219
+ "language_model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00006.safetensors",
220
+ "language_model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
221
+ "language_model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
222
+ "language_model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
223
+ "language_model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
224
+ "language_model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
225
+ "language_model.model.layers.30.input_layernorm.weight": "model-00005-of-00006.safetensors",
226
+ "language_model.model.layers.30.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
227
+ "language_model.model.layers.30.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
228
+ "language_model.model.layers.30.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
229
+ "language_model.model.layers.30.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
230
+ "language_model.model.layers.30.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
231
+ "language_model.model.layers.30.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
232
+ "language_model.model.layers.30.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
233
+ "language_model.model.layers.30.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
234
+ "language_model.model.layers.31.input_layernorm.weight": "model-00005-of-00006.safetensors",
235
+ "language_model.model.layers.31.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
236
+ "language_model.model.layers.31.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
237
+ "language_model.model.layers.31.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
238
+ "language_model.model.layers.31.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
239
+ "language_model.model.layers.31.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
240
+ "language_model.model.layers.31.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
241
+ "language_model.model.layers.31.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
242
+ "language_model.model.layers.31.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
243
+ "language_model.model.layers.32.input_layernorm.weight": "model-00005-of-00006.safetensors",
244
+ "language_model.model.layers.32.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
245
+ "language_model.model.layers.32.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
246
+ "language_model.model.layers.32.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
247
+ "language_model.model.layers.32.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
248
+ "language_model.model.layers.32.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
249
+ "language_model.model.layers.32.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
250
+ "language_model.model.layers.32.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
251
+ "language_model.model.layers.32.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
252
+ "language_model.model.layers.33.input_layernorm.weight": "model-00005-of-00006.safetensors",
253
+ "language_model.model.layers.33.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
254
+ "language_model.model.layers.33.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
255
+ "language_model.model.layers.33.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
256
+ "language_model.model.layers.33.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
257
+ "language_model.model.layers.33.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
258
+ "language_model.model.layers.33.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
259
+ "language_model.model.layers.33.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
260
+ "language_model.model.layers.33.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
261
+ "language_model.model.layers.34.input_layernorm.weight": "model-00005-of-00006.safetensors",
262
+ "language_model.model.layers.34.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
263
+ "language_model.model.layers.34.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
264
+ "language_model.model.layers.34.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
265
+ "language_model.model.layers.34.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
266
+ "language_model.model.layers.34.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
267
+ "language_model.model.layers.34.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
268
+ "language_model.model.layers.34.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
269
+ "language_model.model.layers.34.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
270
+ "language_model.model.layers.35.input_layernorm.weight": "model-00005-of-00006.safetensors",
271
+ "language_model.model.layers.35.mlp.down_proj.weight": "model-00005-of-00006.safetensors",
272
+ "language_model.model.layers.35.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
273
+ "language_model.model.layers.35.mlp.up_proj.weight": "model-00005-of-00006.safetensors",
274
+ "language_model.model.layers.35.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
275
+ "language_model.model.layers.35.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
276
+ "language_model.model.layers.35.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
277
+ "language_model.model.layers.35.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
278
+ "language_model.model.layers.35.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
279
+ "language_model.model.layers.36.input_layernorm.weight": "model-00006-of-00006.safetensors",
280
+ "language_model.model.layers.36.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
281
+ "language_model.model.layers.36.mlp.gate_proj.weight": "model-00005-of-00006.safetensors",
282
+ "language_model.model.layers.36.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
283
+ "language_model.model.layers.36.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
284
+ "language_model.model.layers.36.self_attn.k_proj.weight": "model-00005-of-00006.safetensors",
285
+ "language_model.model.layers.36.self_attn.o_proj.weight": "model-00005-of-00006.safetensors",
286
+ "language_model.model.layers.36.self_attn.q_proj.weight": "model-00005-of-00006.safetensors",
287
+ "language_model.model.layers.36.self_attn.v_proj.weight": "model-00005-of-00006.safetensors",
288
+ "language_model.model.layers.37.input_layernorm.weight": "model-00006-of-00006.safetensors",
289
+ "language_model.model.layers.37.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
290
+ "language_model.model.layers.37.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
291
+ "language_model.model.layers.37.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
292
+ "language_model.model.layers.37.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
293
+ "language_model.model.layers.37.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
294
+ "language_model.model.layers.37.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
295
+ "language_model.model.layers.37.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
296
+ "language_model.model.layers.37.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
297
+ "language_model.model.layers.38.input_layernorm.weight": "model-00006-of-00006.safetensors",
298
+ "language_model.model.layers.38.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
299
+ "language_model.model.layers.38.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
300
+ "language_model.model.layers.38.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
301
+ "language_model.model.layers.38.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
302
+ "language_model.model.layers.38.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
303
+ "language_model.model.layers.38.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
304
+ "language_model.model.layers.38.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
305
+ "language_model.model.layers.38.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
306
+ "language_model.model.layers.39.input_layernorm.weight": "model-00006-of-00006.safetensors",
307
+ "language_model.model.layers.39.mlp.down_proj.weight": "model-00006-of-00006.safetensors",
308
+ "language_model.model.layers.39.mlp.gate_proj.weight": "model-00006-of-00006.safetensors",
309
+ "language_model.model.layers.39.mlp.up_proj.weight": "model-00006-of-00006.safetensors",
310
+ "language_model.model.layers.39.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
311
+ "language_model.model.layers.39.self_attn.k_proj.weight": "model-00006-of-00006.safetensors",
312
+ "language_model.model.layers.39.self_attn.o_proj.weight": "model-00006-of-00006.safetensors",
313
+ "language_model.model.layers.39.self_attn.q_proj.weight": "model-00006-of-00006.safetensors",
314
+ "language_model.model.layers.39.self_attn.v_proj.weight": "model-00006-of-00006.safetensors",
315
+ "language_model.model.layers.4.input_layernorm.weight": "model-00002-of-00006.safetensors",
316
+ "language_model.model.layers.4.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
317
+ "language_model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00006.safetensors",
318
+ "language_model.model.layers.4.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
319
+ "language_model.model.layers.4.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
320
+ "language_model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00006.safetensors",
321
+ "language_model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00006.safetensors",
322
+ "language_model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00006.safetensors",
323
+ "language_model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00006.safetensors",
324
+ "language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00006.safetensors",
325
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
326
+ "language_model.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
327
+ "language_model.model.layers.5.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
328
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
329
+ "language_model.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
330
+ "language_model.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
331
+ "language_model.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
332
+ "language_model.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
333
+ "language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00006.safetensors",
334
+ "language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
335
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
336
+ "language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
337
+ "language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
338
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
339
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
340
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
341
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
342
+ "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00006.safetensors",
343
+ "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
344
+ "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
345
+ "language_model.model.layers.7.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
346
+ "language_model.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
347
+ "language_model.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
348
+ "language_model.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
349
+ "language_model.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
350
+ "language_model.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
351
+ "language_model.model.layers.8.input_layernorm.weight": "model-00002-of-00006.safetensors",
352
+ "language_model.model.layers.8.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
353
+ "language_model.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
354
+ "language_model.model.layers.8.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
355
+ "language_model.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
356
+ "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
357
+ "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
358
+ "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
359
+ "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
360
+ "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
361
+ "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00006.safetensors",
362
+ "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00006.safetensors",
363
+ "language_model.model.layers.9.mlp.up_proj.weight": "model-00002-of-00006.safetensors",
364
+ "language_model.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
365
+ "language_model.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00006.safetensors",
366
+ "language_model.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00006.safetensors",
367
+ "language_model.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00006.safetensors",
368
+ "language_model.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00006.safetensors",
369
+ "language_model.model.norm.weight": "model-00006-of-00006.safetensors",
370
+ "multi_modal_projector.linear_1.weight": "model-00001-of-00006.safetensors",
371
+ "multi_modal_projector.linear_2.weight": "model-00001-of-00006.safetensors",
372
+ "multi_modal_projector.norm.weight": "model-00001-of-00006.safetensors",
373
+ "multi_modal_projector.patch_merger.merging_layer.weight": "model-00001-of-00006.safetensors",
374
+ "vision_tower.ln_pre.weight": "model-00001-of-00006.safetensors",
375
+ "vision_tower.patch_conv.weight": "model-00001-of-00006.safetensors",
376
+ "vision_tower.transformer.layers.0.attention.k_proj.weight": "model-00001-of-00006.safetensors",
377
+ "vision_tower.transformer.layers.0.attention.o_proj.weight": "model-00001-of-00006.safetensors",
378
+ "vision_tower.transformer.layers.0.attention.q_proj.weight": "model-00001-of-00006.safetensors",
379
+ "vision_tower.transformer.layers.0.attention.v_proj.weight": "model-00001-of-00006.safetensors",
380
+ "vision_tower.transformer.layers.0.attention_norm.weight": "model-00001-of-00006.safetensors",
381
+ "vision_tower.transformer.layers.0.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
382
+ "vision_tower.transformer.layers.0.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
383
+ "vision_tower.transformer.layers.0.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
384
+ "vision_tower.transformer.layers.0.ffn_norm.weight": "model-00001-of-00006.safetensors",
385
+ "vision_tower.transformer.layers.1.attention.k_proj.weight": "model-00001-of-00006.safetensors",
386
+ "vision_tower.transformer.layers.1.attention.o_proj.weight": "model-00001-of-00006.safetensors",
387
+ "vision_tower.transformer.layers.1.attention.q_proj.weight": "model-00001-of-00006.safetensors",
388
+ "vision_tower.transformer.layers.1.attention.v_proj.weight": "model-00001-of-00006.safetensors",
389
+ "vision_tower.transformer.layers.1.attention_norm.weight": "model-00001-of-00006.safetensors",
390
+ "vision_tower.transformer.layers.1.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
391
+ "vision_tower.transformer.layers.1.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
392
+ "vision_tower.transformer.layers.1.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
393
+ "vision_tower.transformer.layers.1.ffn_norm.weight": "model-00001-of-00006.safetensors",
394
+ "vision_tower.transformer.layers.10.attention.k_proj.weight": "model-00001-of-00006.safetensors",
395
+ "vision_tower.transformer.layers.10.attention.o_proj.weight": "model-00001-of-00006.safetensors",
396
+ "vision_tower.transformer.layers.10.attention.q_proj.weight": "model-00001-of-00006.safetensors",
397
+ "vision_tower.transformer.layers.10.attention.v_proj.weight": "model-00001-of-00006.safetensors",
398
+ "vision_tower.transformer.layers.10.attention_norm.weight": "model-00001-of-00006.safetensors",
399
+ "vision_tower.transformer.layers.10.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
400
+ "vision_tower.transformer.layers.10.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
401
+ "vision_tower.transformer.layers.10.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
402
+ "vision_tower.transformer.layers.10.ffn_norm.weight": "model-00001-of-00006.safetensors",
403
+ "vision_tower.transformer.layers.11.attention.k_proj.weight": "model-00001-of-00006.safetensors",
404
+ "vision_tower.transformer.layers.11.attention.o_proj.weight": "model-00001-of-00006.safetensors",
405
+ "vision_tower.transformer.layers.11.attention.q_proj.weight": "model-00001-of-00006.safetensors",
406
+ "vision_tower.transformer.layers.11.attention.v_proj.weight": "model-00001-of-00006.safetensors",
407
+ "vision_tower.transformer.layers.11.attention_norm.weight": "model-00001-of-00006.safetensors",
408
+ "vision_tower.transformer.layers.11.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
409
+ "vision_tower.transformer.layers.11.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
410
+ "vision_tower.transformer.layers.11.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
411
+ "vision_tower.transformer.layers.11.ffn_norm.weight": "model-00001-of-00006.safetensors",
412
+ "vision_tower.transformer.layers.12.attention.k_proj.weight": "model-00001-of-00006.safetensors",
413
+ "vision_tower.transformer.layers.12.attention.o_proj.weight": "model-00001-of-00006.safetensors",
414
+ "vision_tower.transformer.layers.12.attention.q_proj.weight": "model-00001-of-00006.safetensors",
415
+ "vision_tower.transformer.layers.12.attention.v_proj.weight": "model-00001-of-00006.safetensors",
416
+ "vision_tower.transformer.layers.12.attention_norm.weight": "model-00001-of-00006.safetensors",
417
+ "vision_tower.transformer.layers.12.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
418
+ "vision_tower.transformer.layers.12.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
419
+ "vision_tower.transformer.layers.12.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
420
+ "vision_tower.transformer.layers.12.ffn_norm.weight": "model-00001-of-00006.safetensors",
421
+ "vision_tower.transformer.layers.13.attention.k_proj.weight": "model-00001-of-00006.safetensors",
422
+ "vision_tower.transformer.layers.13.attention.o_proj.weight": "model-00001-of-00006.safetensors",
423
+ "vision_tower.transformer.layers.13.attention.q_proj.weight": "model-00001-of-00006.safetensors",
424
+ "vision_tower.transformer.layers.13.attention.v_proj.weight": "model-00001-of-00006.safetensors",
425
+ "vision_tower.transformer.layers.13.attention_norm.weight": "model-00001-of-00006.safetensors",
426
+ "vision_tower.transformer.layers.13.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
427
+ "vision_tower.transformer.layers.13.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
428
+ "vision_tower.transformer.layers.13.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
429
+ "vision_tower.transformer.layers.13.ffn_norm.weight": "model-00001-of-00006.safetensors",
430
+ "vision_tower.transformer.layers.14.attention.k_proj.weight": "model-00001-of-00006.safetensors",
431
+ "vision_tower.transformer.layers.14.attention.o_proj.weight": "model-00001-of-00006.safetensors",
432
+ "vision_tower.transformer.layers.14.attention.q_proj.weight": "model-00001-of-00006.safetensors",
433
+ "vision_tower.transformer.layers.14.attention.v_proj.weight": "model-00001-of-00006.safetensors",
434
+ "vision_tower.transformer.layers.14.attention_norm.weight": "model-00001-of-00006.safetensors",
435
+ "vision_tower.transformer.layers.14.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
436
+ "vision_tower.transformer.layers.14.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
437
+ "vision_tower.transformer.layers.14.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
438
+ "vision_tower.transformer.layers.14.ffn_norm.weight": "model-00001-of-00006.safetensors",
439
+ "vision_tower.transformer.layers.15.attention.k_proj.weight": "model-00001-of-00006.safetensors",
440
+ "vision_tower.transformer.layers.15.attention.o_proj.weight": "model-00001-of-00006.safetensors",
441
+ "vision_tower.transformer.layers.15.attention.q_proj.weight": "model-00001-of-00006.safetensors",
442
+ "vision_tower.transformer.layers.15.attention.v_proj.weight": "model-00001-of-00006.safetensors",
443
+ "vision_tower.transformer.layers.15.attention_norm.weight": "model-00001-of-00006.safetensors",
444
+ "vision_tower.transformer.layers.15.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
445
+ "vision_tower.transformer.layers.15.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
446
+ "vision_tower.transformer.layers.15.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
447
+ "vision_tower.transformer.layers.15.ffn_norm.weight": "model-00001-of-00006.safetensors",
448
+ "vision_tower.transformer.layers.16.attention.k_proj.weight": "model-00001-of-00006.safetensors",
449
+ "vision_tower.transformer.layers.16.attention.o_proj.weight": "model-00001-of-00006.safetensors",
450
+ "vision_tower.transformer.layers.16.attention.q_proj.weight": "model-00001-of-00006.safetensors",
451
+ "vision_tower.transformer.layers.16.attention.v_proj.weight": "model-00001-of-00006.safetensors",
452
+ "vision_tower.transformer.layers.16.attention_norm.weight": "model-00001-of-00006.safetensors",
453
+ "vision_tower.transformer.layers.16.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
454
+ "vision_tower.transformer.layers.16.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
455
+ "vision_tower.transformer.layers.16.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
456
+ "vision_tower.transformer.layers.16.ffn_norm.weight": "model-00001-of-00006.safetensors",
457
+ "vision_tower.transformer.layers.17.attention.k_proj.weight": "model-00001-of-00006.safetensors",
458
+ "vision_tower.transformer.layers.17.attention.o_proj.weight": "model-00001-of-00006.safetensors",
459
+ "vision_tower.transformer.layers.17.attention.q_proj.weight": "model-00001-of-00006.safetensors",
460
+ "vision_tower.transformer.layers.17.attention.v_proj.weight": "model-00001-of-00006.safetensors",
461
+ "vision_tower.transformer.layers.17.attention_norm.weight": "model-00001-of-00006.safetensors",
462
+ "vision_tower.transformer.layers.17.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
463
+ "vision_tower.transformer.layers.17.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
464
+ "vision_tower.transformer.layers.17.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
465
+ "vision_tower.transformer.layers.17.ffn_norm.weight": "model-00001-of-00006.safetensors",
466
+ "vision_tower.transformer.layers.18.attention.k_proj.weight": "model-00001-of-00006.safetensors",
467
+ "vision_tower.transformer.layers.18.attention.o_proj.weight": "model-00001-of-00006.safetensors",
468
+ "vision_tower.transformer.layers.18.attention.q_proj.weight": "model-00001-of-00006.safetensors",
469
+ "vision_tower.transformer.layers.18.attention.v_proj.weight": "model-00001-of-00006.safetensors",
470
+ "vision_tower.transformer.layers.18.attention_norm.weight": "model-00001-of-00006.safetensors",
471
+ "vision_tower.transformer.layers.18.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
472
+ "vision_tower.transformer.layers.18.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
473
+ "vision_tower.transformer.layers.18.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
474
+ "vision_tower.transformer.layers.18.ffn_norm.weight": "model-00001-of-00006.safetensors",
475
+ "vision_tower.transformer.layers.19.attention.k_proj.weight": "model-00001-of-00006.safetensors",
476
+ "vision_tower.transformer.layers.19.attention.o_proj.weight": "model-00001-of-00006.safetensors",
477
+ "vision_tower.transformer.layers.19.attention.q_proj.weight": "model-00001-of-00006.safetensors",
478
+ "vision_tower.transformer.layers.19.attention.v_proj.weight": "model-00001-of-00006.safetensors",
479
+ "vision_tower.transformer.layers.19.attention_norm.weight": "model-00001-of-00006.safetensors",
480
+ "vision_tower.transformer.layers.19.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
481
+ "vision_tower.transformer.layers.19.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
482
+ "vision_tower.transformer.layers.19.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
483
+ "vision_tower.transformer.layers.19.ffn_norm.weight": "model-00001-of-00006.safetensors",
484
+ "vision_tower.transformer.layers.2.attention.k_proj.weight": "model-00001-of-00006.safetensors",
485
+ "vision_tower.transformer.layers.2.attention.o_proj.weight": "model-00001-of-00006.safetensors",
486
+ "vision_tower.transformer.layers.2.attention.q_proj.weight": "model-00001-of-00006.safetensors",
487
+ "vision_tower.transformer.layers.2.attention.v_proj.weight": "model-00001-of-00006.safetensors",
488
+ "vision_tower.transformer.layers.2.attention_norm.weight": "model-00001-of-00006.safetensors",
489
+ "vision_tower.transformer.layers.2.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
490
+ "vision_tower.transformer.layers.2.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
491
+ "vision_tower.transformer.layers.2.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
492
+ "vision_tower.transformer.layers.2.ffn_norm.weight": "model-00001-of-00006.safetensors",
493
+ "vision_tower.transformer.layers.20.attention.k_proj.weight": "model-00001-of-00006.safetensors",
494
+ "vision_tower.transformer.layers.20.attention.o_proj.weight": "model-00001-of-00006.safetensors",
495
+ "vision_tower.transformer.layers.20.attention.q_proj.weight": "model-00001-of-00006.safetensors",
496
+ "vision_tower.transformer.layers.20.attention.v_proj.weight": "model-00001-of-00006.safetensors",
497
+ "vision_tower.transformer.layers.20.attention_norm.weight": "model-00001-of-00006.safetensors",
498
+ "vision_tower.transformer.layers.20.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
499
+ "vision_tower.transformer.layers.20.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
500
+ "vision_tower.transformer.layers.20.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
501
+ "vision_tower.transformer.layers.20.ffn_norm.weight": "model-00001-of-00006.safetensors",
502
+ "vision_tower.transformer.layers.21.attention.k_proj.weight": "model-00001-of-00006.safetensors",
503
+ "vision_tower.transformer.layers.21.attention.o_proj.weight": "model-00001-of-00006.safetensors",
504
+ "vision_tower.transformer.layers.21.attention.q_proj.weight": "model-00001-of-00006.safetensors",
505
+ "vision_tower.transformer.layers.21.attention.v_proj.weight": "model-00001-of-00006.safetensors",
506
+ "vision_tower.transformer.layers.21.attention_norm.weight": "model-00001-of-00006.safetensors",
507
+ "vision_tower.transformer.layers.21.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
508
+ "vision_tower.transformer.layers.21.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
509
+ "vision_tower.transformer.layers.21.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
510
+ "vision_tower.transformer.layers.21.ffn_norm.weight": "model-00001-of-00006.safetensors",
511
+ "vision_tower.transformer.layers.22.attention.k_proj.weight": "model-00001-of-00006.safetensors",
512
+ "vision_tower.transformer.layers.22.attention.o_proj.weight": "model-00001-of-00006.safetensors",
513
+ "vision_tower.transformer.layers.22.attention.q_proj.weight": "model-00001-of-00006.safetensors",
514
+ "vision_tower.transformer.layers.22.attention.v_proj.weight": "model-00001-of-00006.safetensors",
515
+ "vision_tower.transformer.layers.22.attention_norm.weight": "model-00001-of-00006.safetensors",
516
+ "vision_tower.transformer.layers.22.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
517
+ "vision_tower.transformer.layers.22.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
518
+ "vision_tower.transformer.layers.22.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
519
+ "vision_tower.transformer.layers.22.ffn_norm.weight": "model-00001-of-00006.safetensors",
520
+ "vision_tower.transformer.layers.23.attention.k_proj.weight": "model-00001-of-00006.safetensors",
521
+ "vision_tower.transformer.layers.23.attention.o_proj.weight": "model-00001-of-00006.safetensors",
522
+ "vision_tower.transformer.layers.23.attention.q_proj.weight": "model-00001-of-00006.safetensors",
523
+ "vision_tower.transformer.layers.23.attention.v_proj.weight": "model-00001-of-00006.safetensors",
524
+ "vision_tower.transformer.layers.23.attention_norm.weight": "model-00001-of-00006.safetensors",
525
+ "vision_tower.transformer.layers.23.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
526
+ "vision_tower.transformer.layers.23.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
527
+ "vision_tower.transformer.layers.23.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
528
+ "vision_tower.transformer.layers.23.ffn_norm.weight": "model-00001-of-00006.safetensors",
529
+ "vision_tower.transformer.layers.3.attention.k_proj.weight": "model-00001-of-00006.safetensors",
530
+ "vision_tower.transformer.layers.3.attention.o_proj.weight": "model-00001-of-00006.safetensors",
531
+ "vision_tower.transformer.layers.3.attention.q_proj.weight": "model-00001-of-00006.safetensors",
532
+ "vision_tower.transformer.layers.3.attention.v_proj.weight": "model-00001-of-00006.safetensors",
533
+ "vision_tower.transformer.layers.3.attention_norm.weight": "model-00001-of-00006.safetensors",
534
+ "vision_tower.transformer.layers.3.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
535
+ "vision_tower.transformer.layers.3.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
536
+ "vision_tower.transformer.layers.3.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
537
+ "vision_tower.transformer.layers.3.ffn_norm.weight": "model-00001-of-00006.safetensors",
538
+ "vision_tower.transformer.layers.4.attention.k_proj.weight": "model-00001-of-00006.safetensors",
539
+ "vision_tower.transformer.layers.4.attention.o_proj.weight": "model-00001-of-00006.safetensors",
540
+ "vision_tower.transformer.layers.4.attention.q_proj.weight": "model-00001-of-00006.safetensors",
541
+ "vision_tower.transformer.layers.4.attention.v_proj.weight": "model-00001-of-00006.safetensors",
542
+ "vision_tower.transformer.layers.4.attention_norm.weight": "model-00001-of-00006.safetensors",
543
+ "vision_tower.transformer.layers.4.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
544
+ "vision_tower.transformer.layers.4.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
545
+ "vision_tower.transformer.layers.4.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
546
+ "vision_tower.transformer.layers.4.ffn_norm.weight": "model-00001-of-00006.safetensors",
547
+ "vision_tower.transformer.layers.5.attention.k_proj.weight": "model-00001-of-00006.safetensors",
548
+ "vision_tower.transformer.layers.5.attention.o_proj.weight": "model-00001-of-00006.safetensors",
549
+ "vision_tower.transformer.layers.5.attention.q_proj.weight": "model-00001-of-00006.safetensors",
550
+ "vision_tower.transformer.layers.5.attention.v_proj.weight": "model-00001-of-00006.safetensors",
551
+ "vision_tower.transformer.layers.5.attention_norm.weight": "model-00001-of-00006.safetensors",
552
+ "vision_tower.transformer.layers.5.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
553
+ "vision_tower.transformer.layers.5.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
554
+ "vision_tower.transformer.layers.5.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
555
+ "vision_tower.transformer.layers.5.ffn_norm.weight": "model-00001-of-00006.safetensors",
556
+ "vision_tower.transformer.layers.6.attention.k_proj.weight": "model-00001-of-00006.safetensors",
557
+ "vision_tower.transformer.layers.6.attention.o_proj.weight": "model-00001-of-00006.safetensors",
558
+ "vision_tower.transformer.layers.6.attention.q_proj.weight": "model-00001-of-00006.safetensors",
559
+ "vision_tower.transformer.layers.6.attention.v_proj.weight": "model-00001-of-00006.safetensors",
560
+ "vision_tower.transformer.layers.6.attention_norm.weight": "model-00001-of-00006.safetensors",
561
+ "vision_tower.transformer.layers.6.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
562
+ "vision_tower.transformer.layers.6.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
563
+ "vision_tower.transformer.layers.6.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
564
+ "vision_tower.transformer.layers.6.ffn_norm.weight": "model-00001-of-00006.safetensors",
565
+ "vision_tower.transformer.layers.7.attention.k_proj.weight": "model-00001-of-00006.safetensors",
566
+ "vision_tower.transformer.layers.7.attention.o_proj.weight": "model-00001-of-00006.safetensors",
567
+ "vision_tower.transformer.layers.7.attention.q_proj.weight": "model-00001-of-00006.safetensors",
568
+ "vision_tower.transformer.layers.7.attention.v_proj.weight": "model-00001-of-00006.safetensors",
569
+ "vision_tower.transformer.layers.7.attention_norm.weight": "model-00001-of-00006.safetensors",
570
+ "vision_tower.transformer.layers.7.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
571
+ "vision_tower.transformer.layers.7.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
572
+ "vision_tower.transformer.layers.7.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
573
+ "vision_tower.transformer.layers.7.ffn_norm.weight": "model-00001-of-00006.safetensors",
574
+ "vision_tower.transformer.layers.8.attention.k_proj.weight": "model-00001-of-00006.safetensors",
575
+ "vision_tower.transformer.layers.8.attention.o_proj.weight": "model-00001-of-00006.safetensors",
576
+ "vision_tower.transformer.layers.8.attention.q_proj.weight": "model-00001-of-00006.safetensors",
577
+ "vision_tower.transformer.layers.8.attention.v_proj.weight": "model-00001-of-00006.safetensors",
578
+ "vision_tower.transformer.layers.8.attention_norm.weight": "model-00001-of-00006.safetensors",
579
+ "vision_tower.transformer.layers.8.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
580
+ "vision_tower.transformer.layers.8.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
581
+ "vision_tower.transformer.layers.8.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
582
+ "vision_tower.transformer.layers.8.ffn_norm.weight": "model-00001-of-00006.safetensors",
583
+ "vision_tower.transformer.layers.9.attention.k_proj.weight": "model-00001-of-00006.safetensors",
584
+ "vision_tower.transformer.layers.9.attention.o_proj.weight": "model-00001-of-00006.safetensors",
585
+ "vision_tower.transformer.layers.9.attention.q_proj.weight": "model-00001-of-00006.safetensors",
586
+ "vision_tower.transformer.layers.9.attention.v_proj.weight": "model-00001-of-00006.safetensors",
587
+ "vision_tower.transformer.layers.9.attention_norm.weight": "model-00001-of-00006.safetensors",
588
+ "vision_tower.transformer.layers.9.feed_forward.down_proj.weight": "model-00001-of-00006.safetensors",
589
+ "vision_tower.transformer.layers.9.feed_forward.gate_proj.weight": "model-00001-of-00006.safetensors",
590
+ "vision_tower.transformer.layers.9.feed_forward.up_proj.weight": "model-00001-of-00006.safetensors",
591
+ "vision_tower.transformer.layers.9.ffn_norm.weight": "model-00001-of-00006.safetensors"
592
+ }
593
+ }
params.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dim": 5120,
3
+ "n_layers": 40,
4
+ "head_dim": 128,
5
+ "hidden_dim": 16384,
6
+ "n_heads": 32,
7
+ "n_kv_heads": 8,
8
+ "rope_theta": 1000000000.0,
9
+ "norm_eps": 1e-05,
10
+ "vocab_size": 131072,
11
+ "tied_embeddings": false,
12
+ "max_position_embeddings": 262144,
13
+ "llama_4_scaling": {
14
+ "original_max_position_embeddings": 16384,
15
+ "beta": 0.1
16
+ },
17
+ "q_lora_rank": null,
18
+ "qk_rope_head_dim": null,
19
+ "qk_nope_head_dim": null,
20
+ "kv_lora_rank": null,
21
+ "v_head_dim": null,
22
+ "yarn": {
23
+ "original_max_position_embeddings": 16384,
24
+ "factor": 16,
25
+ "apply_scale": false,
26
+ "beta": 32,
27
+ "alpha": 1
28
+ },
29
+ "vision_encoder": {
30
+ "image_token_id": 10,
31
+ "image_break_token_id": 12,
32
+ "image_end_token_id": 13,
33
+ "intermediate_size": 4096,
34
+ "num_hidden_layers": 24,
35
+ "num_attention_heads": 16,
36
+ "mm_projector_id": "patch_merge",
37
+ "spatial_merge_size": 2,
38
+ "hidden_size": 1024,
39
+ "num_channels": 3,
40
+ "image_size": 1540,
41
+ "max_image_size": 1540,
42
+ "patch_size": 14,
43
+ "rope_theta": 10000.0,
44
+ "add_pre_mm_projector_layer_norm": true,
45
+ "adapter_bias": false
46
+ }
47
+ }
processor_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "image_break_token": "[IMG_BREAK]",
3
+ "image_end_token": "[IMG_END]",
4
+ "image_processor": {
5
+ "crop_size": null,
6
+ "data_format": "channels_first",
7
+ "device": null,
8
+ "disable_grouping": null,
9
+ "do_center_crop": null,
10
+ "do_convert_rgb": true,
11
+ "do_normalize": true,
12
+ "do_pad": null,
13
+ "do_rescale": true,
14
+ "do_resize": true,
15
+ "image_mean": [
16
+ 0.48145466,
17
+ 0.4578275,
18
+ 0.40821073
19
+ ],
20
+ "image_processor_type": "PixtralImageProcessorFast",
21
+ "image_seq_length": null,
22
+ "image_std": [
23
+ 0.26862954,
24
+ 0.26130258,
25
+ 0.27577711
26
+ ],
27
+ "input_data_format": null,
28
+ "pad_size": null,
29
+ "patch_size": 14,
30
+ "processor_class": "PixtralProcessor",
31
+ "resample": 3,
32
+ "rescale_factor": 0.00392156862745098,
33
+ "return_tensors": null,
34
+ "size": {
35
+ "longest_edge": 1540
36
+ }
37
+ },
38
+ "image_token": "[IMG]",
39
+ "patch_size": 14,
40
+ "processor_class": "PixtralProcessor",
41
+ "spatial_merge_size": 2
42
+ }
special_tokens_map.json ADDED
The diff for this file is too large to render. See raw diff
 
tekken.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e29d19ea32eb7e26e6c0572d57cb7f9eca0f4420e0e0fe6ae1cf3be94da1c0d6
3
+ size 16753777
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27cc2a0b9ebfda9540615116cbb444ddc4793bfc80da8ca0d730a0b54d2b6581
3
+ size 17077993
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff