Jessie09 commited on
Commit
8014d08
·
verified ·
1 Parent(s): 21774ba

Upload dataset

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +9 -0
  2. .vscode/launch.json +61 -0
  3. Algorithms.py +755 -0
  4. ArcherCritic.py +161 -0
  5. Dataset.py +328 -0
  6. Llama3-8B-I_Strategic_log.txt +0 -0
  7. Llama3-8B-I_Word_log.txt +0 -0
  8. Qwen3-14B_Strategic_log.txt +12 -0
  9. Qwen3-14B_Word_log.txt +285 -0
  10. README.md +57 -0
  11. SimulateOnEnv.py +40 -0
  12. Tasks.py +476 -0
  13. __pycache__/Algorithms.cpython-310.pyc +0 -0
  14. __pycache__/ArcherCritic.cpython-310.pyc +0 -0
  15. __pycache__/Dataset.cpython-310.pyc +0 -0
  16. __pycache__/SimulateOnEnv.cpython-310.pyc +0 -0
  17. __pycache__/Tasks.cpython-310.pyc +0 -0
  18. __pycache__/rsa_game.cpython-310.pyc +0 -0
  19. __pycache__/twenty_questions.cpython-310.pyc +0 -0
  20. __pycache__/word_taboo.cpython-310.pyc +0 -0
  21. archer.egg-info/PKG-INFO +95 -0
  22. archer.egg-info/SOURCES.txt +7 -0
  23. archer.egg-info/dependency_links.txt +1 -0
  24. archer.egg-info/requires.txt +13 -0
  25. archer.egg-info/top_level.txt +1 -0
  26. archer.tar.gz +3 -0
  27. checkpoints/archer_Llama3-8B-I_strategic/README.md +207 -0
  28. checkpoints/archer_Llama3-8B-I_strategic/adapter_config.json +37 -0
  29. checkpoints/archer_Llama3-8B-I_strategic/adapter_model.safetensors +3 -0
  30. checkpoints/archer_Llama3-8B-I_strategic/chat_template.jinja +5 -0
  31. checkpoints/archer_Llama3-8B-I_strategic/special_tokens_map.json +23 -0
  32. checkpoints/archer_Llama3-8B-I_strategic/tokenizer.json +3 -0
  33. checkpoints/archer_Llama3-8B-I_strategic/tokenizer_config.json +2065 -0
  34. checkpoints/archer_Llama3-8B-I_word/README.md +207 -0
  35. checkpoints/archer_Llama3-8B-I_word/adapter_config.json +37 -0
  36. checkpoints/archer_Llama3-8B-I_word/adapter_model.safetensors +3 -0
  37. checkpoints/archer_Llama3-8B-I_word/chat_template.jinja +5 -0
  38. checkpoints/archer_Llama3-8B-I_word/special_tokens_map.json +23 -0
  39. checkpoints/archer_Llama3-8B-I_word/tokenizer.json +3 -0
  40. checkpoints/archer_Llama3-8B-I_word/tokenizer_config.json +2065 -0
  41. deepspeed_zero3.yaml +66 -0
  42. main.py +33 -0
  43. requirements.txt +13 -0
  44. rsa_game.py +383 -0
  45. setup.py +24 -0
  46. submit_BC_TwentyQuestions.sh +25 -0
  47. submit_FBC_TwentyQuestions.sh +26 -0
  48. submit_OfflineArcher_RSA.sh +166 -0
  49. submit_OfflineArcher_TwentyQuestions.sh +37 -0
  50. test.py +34 -0
.gitattributes CHANGED
@@ -57,3 +57,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ checkpoints/archer_Llama3-8B-I_strategic/tokenizer.json filter=lfs diff=lfs merge=lfs -text
61
+ checkpoints/archer_Llama3-8B-I_word/tokenizer.json filter=lfs diff=lfs merge=lfs -text
62
+ wandb/run-20250914_072202-c40vakf9/run-c40vakf9.wandb filter=lfs diff=lfs merge=lfs -text
63
+ wandb/run-20250914_191149-a5wf2wi9/run-a5wf2wi9.wandb filter=lfs diff=lfs merge=lfs -text
64
+ wandb/run-20250915_014504-agz3jw75/run-agz3jw75.wandb filter=lfs diff=lfs merge=lfs -text
65
+ wandb/run-20250915_111603-tnh8ytpw/run-tnh8ytpw.wandb filter=lfs diff=lfs merge=lfs -text
66
+ wandb/run-20250915_170938-mlb6ufl5/run-mlb6ufl5.wandb filter=lfs diff=lfs merge=lfs -text
67
+ wandb/run-20250915_234149-solrky9k/run-solrky9k.wandb filter=lfs diff=lfs merge=lfs -text
68
+ wandb/run-20250916_001517-sdm2bc8f/run-sdm2bc8f.wandb filter=lfs diff=lfs merge=lfs -text
.vscode/launch.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.2.0",
3
+ "configurations": [
4
+ {
5
+ "name": "OfflineArcher RSA Training",
6
+ "type": "python",
7
+ "request": "launch",
8
+ "python": "/home/jiashuo/anaconda3/envs/archer/bin/python",
9
+ "program": "${workspaceFolder}/main.py",
10
+ "args": [
11
+ "fit",
12
+ "--data=WordTaboo",
13
+ "--data.batch_size=2",
14
+ "--data.n_traj_eval=4",
15
+ "--data.base_model=Qwen3-14B",
16
+ "--model=OfflineArcher",
17
+ "--model.optimize_critic=True",
18
+ "--model.actor_lr=1e-5",
19
+ "--model.critic_lr=1e-5",
20
+ "--model.discount_factor=0.99",
21
+ "--model.tau=0.05",
22
+ "--model.critic_expectile=0.9",
23
+ "--model.inv_temp=1.0",
24
+ "--model.accumulate_grad_batches=16",
25
+ "--model.model_name_or_path=/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_rsa/merged_model",
26
+ "--trainer.fast_dev_run=False",
27
+ "--trainer.max_epoch=1",
28
+ "--trainer.logger=WandbLogger",
29
+ "--trainer.logger.init_args.project=RSAGame-Official",
30
+ "--trainer.default_root_dir=checkpoints/archer_Qwen3-14B_rsa",
31
+ "--trainer.logger.init_args.name=Test-AC-critic_expectile_0.9-inv_temp_1.0",
32
+ "--trainer.strategy=deepspeed_stage_3",
33
+ "--trainer.devices=8",
34
+ "--trainer.accelerator=gpu",
35
+ "--trainer.precision=bf16",
36
+ "--trainer.limit_val_batches=0",
37
+ "--trainer.val_check_interval=null",
38
+ "--trainer.enable_model_summary=false"
39
+ ],
40
+ "env": {
41
+ "PYTORCH_CUDA_ALLOC_CONF": "expandable_segments:True",
42
+ "ACCELERATE_USE_DEEPSPEED": "true",
43
+ "MASTER_PORT": "29500",
44
+ "TMPDIR": "$HOME/tmp",
45
+ "NCCL_P2P_DISABLE": "1",
46
+ "CUDA_LAUNCH_BLOCKING": "1",
47
+ "CUDA_DEVICE_MAX_CONNECTIONS": "1",
48
+ "PYTHONUNBUFFERED": "1",
49
+ "NCCL_DEBUG": "INFO",
50
+ "NCCL_DEBUG_SUBSYS": "INIT,ENV"
51
+ },
52
+ "deepSpeed": {
53
+ "enable": true,
54
+ "configPath": "${env:HOME}/codes/ForesightOptim/configs/deepspeed_zero3.yaml"
55
+ },
56
+ "console": "integratedTerminal",
57
+ "justMyCode": false,
58
+ "cwd": "${workspaceFolder}"
59
+ }
60
+ ]
61
+ }
Algorithms.py ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import lightning as L
2
+ from lightning.pytorch.utilities import rank_zero_only
3
+ import torch
4
+ import os
5
+ import gc
6
+ torch.set_float32_matmul_precision("high")
7
+
8
+ from SimulateOnEnv import batch_simulate_on_environment
9
+ from lightning.pytorch.callbacks import Callback
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
11
+ from typing import Optional
12
+ from peft import LoraConfig, TaskType, get_peft_model, PeftModel
13
+
14
+ from safetensors import safe_open
15
+ from safetensors.torch import save_file
16
+
17
+ def safe_load(path):
18
+ """安全加载权重,处理大小不匹配"""
19
+ result = {}
20
+ with safe_open(path, framework="pt") as f:
21
+ for key in f.keys():
22
+ try:
23
+ result[key] = f.get_tensor(key)
24
+ except Exception as e:
25
+ print(f"Error loading {key}: {str(e)}")
26
+ return result
27
+
28
+
29
+ def set_special_tokens(model, tokenizer):
30
+ if tokenizer.pad_token is None and tokenizer.pad_token_id is None:
31
+ print_rank_0(f"[WARNING] the pad token of the tokenizer is None")
32
+ # We do not resize the vocab embedding, since it ruins the KL value with the ref_model
33
+ tokenizer.pad_token_id = tokenizer.eos_token_id
34
+ tokenizer.pad_token = tokenizer.eos_token
35
+ # tokenizer.pad_token = tokenizer.decode(0)
36
+
37
+ model.config.pad_token_id = tokenizer.pad_token_id
38
+ model.config.bos_token_id = tokenizer.bos_token_id
39
+ model.config.eos_token_id = tokenizer.eos_token_id
40
+
41
+ return model, tokenizer
42
+
43
+ def load_model_and_tokenizer(model_name_or_path, actor_checkpoint=None):
44
+
45
+ model = AutoModelForCausalLM.from_pretrained(
46
+ model_name_or_path,
47
+ trust_remote_code=True,
48
+ use_cache=False,
49
+ torch_dtype=torch.bfloat16,
50
+ low_cpu_mem_usage=True,
51
+ )
52
+
53
+ if hasattr(model, "ref_model"):
54
+ del model.ref_model
55
+
56
+ lora_config = LoraConfig(
57
+ r=8,
58
+ lora_alpha=16,
59
+ target_modules=["q_proj", "v_proj"],
60
+ lora_dropout=0.05,
61
+ bias="none",
62
+ task_type=TaskType.CAUSAL_LM,
63
+ )
64
+ model = get_peft_model(model, lora_config)
65
+
66
+ if actor_checkpoint is not None:
67
+ weight_map = {}
68
+ with safe_open(actor_checkpoint, framework="pt") as f:
69
+ for key in f.keys():
70
+ new_key = key.replace("base_model.model.", "")
71
+ weight_map[new_key] = f.get_tensor(key)
72
+
73
+ # 应用权重
74
+ for name, param in model.named_parameters():
75
+ for key, tensor in weight_map.items():
76
+ if key in name and param.shape == tensor.shape:
77
+ param.data.copy_(tensor)
78
+ print(f"加载权重: {name} <- {key}")
79
+ break
80
+
81
+ tokenizer = AutoTokenizer.from_pretrained(
82
+ model_name_or_path,
83
+ padding_side="left", # for batch decode
84
+ truncation_side="left",
85
+ model_max_length=1024,
86
+ trust_remote_code=True,
87
+ )
88
+
89
+ model.gradient_checkpointing_enable()
90
+ model, tokenizer = set_special_tokens(model, tokenizer)
91
+
92
+ return model, tokenizer
93
+
94
+
95
+ class ActorModel(torch.nn.Module):
96
+ def __init__(self, get_device, model_name_or_path, actor_checkpoint=None):
97
+ super().__init__()
98
+ self.get_device = get_device
99
+ self.model, self.tokenizer = load_model_and_tokenizer(model_name_or_path, actor_checkpoint)
100
+
101
+ def forward(self, observation, do_sample=True):
102
+ obs_ids = self.tokenizer(
103
+ observation,
104
+ return_tensors="pt",
105
+ padding=True,
106
+ truncation=True,
107
+ max_length=512,
108
+ ).to(self.model.device)
109
+ obs_embeds = self.model.get_input_embeddings()(obs_ids["input_ids"])
110
+ outputs = self.model.generate(
111
+ inputs_embeds=obs_embeds,
112
+ attention_mask=obs_ids["attention_mask"],
113
+ max_new_tokens=32,
114
+ do_sample=do_sample,
115
+ pad_token_id=self.tokenizer.eos_token_id,
116
+ )
117
+ action = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
118
+ return action
119
+
120
+ def behavioral_cloning_loss(self, observation, action, **kwargs):
121
+ logsum_probs = self.get_logsum_prob(
122
+ observation, action
123
+ ) # this line has been refactored and not tested
124
+ loss = -logsum_probs.mean()
125
+ return loss, {"behavioral_cloning/loss": loss.detach()}
126
+
127
+ def get_logsum_prob(self, observation, action_from_dataloader, **kwargs):
128
+ action = [a + self.tokenizer.eos_token for a in action_from_dataloader]
129
+ alltext = [obs + a for obs, a in zip(observation, action)]
130
+ generated_probabilities = self.to_tokens_and_logprobs(alltext)
131
+ assert (
132
+ len(generated_probabilities)
133
+ == len(alltext)
134
+ == len(observation)
135
+ == len(action)
136
+ )
137
+ mask = torch.zeros_like(generated_probabilities.detach(), dtype=torch.bool)
138
+
139
+ for i, (obs, act, text) in enumerate(zip(observation, action, alltext)):
140
+ assert text == obs + act
141
+ act_ids = self.tokenizer(act, return_tensors="pt", padding=True)
142
+ txt_ids = self.tokenizer(text, return_tensors="pt", padding=True)
143
+ n_token_act = len(
144
+ act_ids["input_ids"][0]
145
+ ) # [0] because the batch is one inside the foor loop
146
+ n_token_txt = len(txt_ids["input_ids"][0])
147
+ mask[i, n_token_txt - n_token_act - 1 : n_token_txt - 1] = (
148
+ True # the -1 shift is due to the the generated probabilities being shifted
149
+ )
150
+
151
+ generated_probabilities = torch.where(mask, generated_probabilities, 1.0)
152
+ log_probs = torch.where(
153
+ mask, torch.log(generated_probabilities), 0.0
154
+ ) # must be separate from the line above for numerical stability (cannot take log(0.0))
155
+ logsum_probs = torch.sum(log_probs, dim=1)
156
+ del act_ids, txt_ids, log_probs, generated_probabilities
157
+ return logsum_probs
158
+
159
+ def to_tokens_and_logprobs(self, input_texts):
160
+ input_ids = self.tokenizer(
161
+ input_texts, padding=True, truncation=True, return_tensors="pt"
162
+ ).input_ids.to(self.get_device())
163
+ outputs = self.model(input_ids)
164
+ probs = torch.softmax(outputs.logits, dim=-1)
165
+
166
+ # collect the probability of the generated token -- probability at index 0 corresponds to the token at index 1
167
+ probs = probs[:, :-1, :]
168
+ input_ids = input_ids[:, 1:]
169
+ gen_probs = torch.gather(probs, 2, input_ids[:, :, None]).squeeze(-1)
170
+ del outputs, probs
171
+ torch.cuda.empty_cache()
172
+ gc.collect()
173
+ torch.cuda.memory._set_allocator_settings('max_split_size_mb:32')
174
+ return gen_probs
175
+
176
+
177
+ class RobertaCritic(torch.nn.Module):
178
+ def __init__(
179
+ self,
180
+ get_device,
181
+ discount_factor: float,
182
+ tau: float,
183
+ expectile: float,
184
+ from_checkpoint=None,
185
+ ):
186
+ super().__init__()
187
+
188
+ self.get_device = get_device
189
+ self.discount_factor = discount_factor
190
+ self.tau = tau
191
+ self.expectile = expectile
192
+
193
+ ### Define the Critic
194
+ from ArcherCritic import ArcherDoubleCritic
195
+
196
+ self.critic = ArcherDoubleCritic(in_dim=768, out_dim=1)
197
+ self.target_critic = ArcherDoubleCritic(in_dim=768, out_dim=1)
198
+ self.soft_update_target_critic(1)
199
+
200
+ if from_checkpoint is not None:
201
+ checkpoint = torch.load(from_checkpoint, map_location=torch.device("cpu"))
202
+ weights = {
203
+ k.removeprefix("critic."): v
204
+ for k, v in checkpoint["state_dict"].items()
205
+ if k.startswith("critic.")
206
+ }
207
+ self.load_state_dict(weights)
208
+ print(
209
+ "I have initialized the critic from the checkpoint: ", from_checkpoint
210
+ )
211
+
212
+ ### Miscellaneus Shortcuts
213
+ self.softmax = torch.nn.Softmax(dim=-1)
214
+ self.td_criterion = torch.nn.MSELoss()
215
+ self.expectile_criterion = lambda diff: self.loss_value_diff(
216
+ diff=diff, expectile=self.expectile
217
+ )
218
+
219
+ def get_q(self, observation, action, detach_model=False):
220
+ return self.critic.get_q(observation, action, detach_model=detach_model)
221
+
222
+ def get_v(self, inputs, detach_model=False):
223
+ return self.critic.get_v(inputs, detach_model=detach_model)
224
+
225
+ def get_target_v(self, inputs, detach_model=False):
226
+ return self.target_critic.get_v(inputs, detach_model=detach_model)
227
+
228
+ def get_target_q(self, observation, action, detach_model=False):
229
+ return self.target_critic.get_q(observation, action, detach_model=detach_model)
230
+
231
+ def get_advantages(self, observation, action):
232
+ q1, q2 = self.get_q(observation, action)
233
+ v1, v2 = self.get_v(observation)
234
+ q = torch.minimum(q1, q2)
235
+ v = torch.minimum(v1, v2)
236
+ advantages = q - v
237
+ return advantages
238
+
239
+ def argmax_advantage(self, observation, get_available_actions):
240
+ argmax_actions = []
241
+ for obs in observation:
242
+ available_actions = get_available_actions(obs)
243
+ advantages = torch.as_tensor(
244
+ [self.get_advantages([obs], [action]) for action in available_actions]
245
+ )
246
+ action = available_actions[torch.argmax(advantages)]
247
+ argmax_actions.append(action)
248
+ return argmax_actions
249
+
250
+ def soft_update_target_critic(self, tau=None):
251
+ if tau == None:
252
+ tau = self.tau
253
+ for target_param, param in zip(
254
+ self.target_critic.parameters(), self.critic.parameters()
255
+ ):
256
+ target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
257
+
258
+ def iql_loss(self, observation, action, reward, next_observation, done, **kwargs):
259
+ ### Fitting the Q function
260
+ q1, q2 = self.get_q(observation, action, detach_model=False)
261
+ q1 = q1.flatten()
262
+ q2 = q2.flatten()
263
+
264
+ reward = torch.Tensor(reward) # .to(self.agent.device)
265
+ done = torch.Tensor(done) # .to(self.agent.device)
266
+
267
+ with torch.no_grad():
268
+ target_v1, target_v2 = self.get_target_v(next_observation)
269
+
270
+ target_v1 = (
271
+ reward
272
+ + torch.logical_not(done) * target_v1.flatten() * self.discount_factor
273
+ )
274
+ target_v2 = (
275
+ reward
276
+ + torch.logical_not(done) * target_v2.flatten() * self.discount_factor
277
+ )
278
+
279
+ q1_loss = self.td_criterion(q1, target_v1)
280
+ q2_loss = self.td_criterion(q2, target_v2)
281
+
282
+ ### Fitting the value function
283
+ with torch.no_grad():
284
+ target_q1, target_q2 = self.get_target_q(
285
+ observation, action, detach_model=False
286
+ )
287
+ target_q1 = target_q1.flatten()
288
+ target_q2 = target_q2.flatten()
289
+
290
+ v1, v2 = self.get_v(observation, detach_model=False)
291
+ v1 = v1.flatten()
292
+ v2 = v2.flatten()
293
+
294
+ v1_loss = self.expectile_criterion(diff=target_q1.detach() - v1)
295
+ v2_loss = self.expectile_criterion(diff=target_q2.detach() - v2)
296
+
297
+ loss = q1_loss + q2_loss + v1_loss + v2_loss
298
+
299
+ ### Log and print what's happening
300
+ log = self.get_log(
301
+ q1=q1,
302
+ q2=q2,
303
+ v1=v1,
304
+ v2=v2,
305
+ q1_loss=q1_loss,
306
+ q2_loss=q2_loss,
307
+ v1_loss=v1_loss,
308
+ v2_loss=v2_loss,
309
+ target_q1=target_q1,
310
+ target_q2=target_q2,
311
+ )
312
+ return loss, log
313
+
314
+ def loss_value_diff(self, diff, expectile):
315
+ """Loss function for iql expectile value difference."""
316
+ weight = torch.where(diff > 0, expectile, (1 - expectile))
317
+ return (weight * (diff**2)).mean()
318
+
319
+ def get_log(
320
+ self, q1, q2, v1, v2, q1_loss, q2_loss, v1_loss, v2_loss, target_q1, target_q2
321
+ ):
322
+ return {
323
+ "critic/q1.loss": q1_loss.detach(),
324
+ "critic/q2.loss": q2_loss.detach(),
325
+ "critic/v1.loss": v1_loss.detach(),
326
+ "critic/v2.loss": v2_loss.detach(),
327
+ "critic/q1.mean": torch.mean(q1).detach(),
328
+ "critic/q1.min": torch.min(q1).detach(),
329
+ "critic/q1.max": torch.max(q1).detach(),
330
+ "critic/q2.mean": torch.mean(q2).detach(),
331
+ "critic/q2.max": torch.max(q2).detach(),
332
+ "critic/q2.min": torch.min(q2).detach(),
333
+ "critic/v1.mean": torch.mean(v1).detach(),
334
+ "critic/v1.min": torch.min(v1).detach(),
335
+ "critic/v1.max": torch.max(v1).detach(),
336
+ "critic/v2.mean": torch.mean(v2).detach(),
337
+ "critic/v2.max": torch.max(v2).detach(),
338
+ "critic/v2.min": torch.min(v2).detach(),
339
+ "critic/target_q1.mean": torch.mean(target_q1).detach(),
340
+ "critic/target_q1.min": torch.min(target_q1).detach(),
341
+ "critic/target_q1.max": torch.max(target_q1).detach(),
342
+ "critic/target_q2.mean": torch.mean(target_q2).detach(),
343
+ "critic/target_q2.max": torch.max(target_q2).detach(),
344
+ "critic/target_q2.min": torch.min(target_q2).detach(),
345
+ }
346
+
347
+
348
+ class Agent(L.LightningModule):
349
+ def validation_step(self, batch, batch_idx):
350
+
351
+ # Perform evaluation on environment with stochastic policy
352
+ return None
353
+ eval_dataset = batch_simulate_on_environment(
354
+ policy=lambda obs: self.forward(obs), env=None
355
+ )
356
+ self.log(
357
+ "eval/avg_return", eval_dataset.mean_trajectory_return(), sync_dist=True
358
+ )
359
+ self.log(
360
+ "eval/std_return", eval_dataset.std_trajectory_return(), sync_dist=True
361
+ )
362
+
363
+ # Perform evaluation on environment with deterministic policy
364
+ deterministic_eval_dataset = batch_simulate_on_environment(
365
+ policy=lambda obs: self.forward(obs, do_sample=False),
366
+ env=None,
367
+ )
368
+ self.log(
369
+ "eval/avg_return_deterministic",
370
+ deterministic_eval_dataset.mean_trajectory_return(),
371
+ sync_dist=True,
372
+ )
373
+ self.log(
374
+ "eval/std_return_deterministic",
375
+ deterministic_eval_dataset.std_trajectory_return(),
376
+ sync_dist=True,
377
+ )
378
+
379
+ return eval_dataset.mean_trajectory_return()
380
+
381
+
382
+ class BehaviouralCloning(Agent):
383
+ def __init__(self, lr: float):
384
+ super().__init__() # Initialize LLM base class
385
+ self.save_hyperparameters()
386
+
387
+ ### Config
388
+ self.lr = lr
389
+
390
+ ### Initialization
391
+ self.agent = ActorModel(get_device=lambda: self.device)
392
+
393
+ def forward(self, observation, **kwargs):
394
+ return self.agent.forward(observation, **kwargs)
395
+
396
+ def training_step(self, batch, batch_idx):
397
+ loss, log = self.agent.behavioral_cloning_loss(**batch)
398
+ self.log_dict(log, sync_dist=True)
399
+ return loss
400
+
401
+ def configure_optimizers(self):
402
+ from torch.optim import Adam
403
+
404
+ # 收集所有需要优化的���数
405
+ optimizer_params = [
406
+ {"params": self.actor.model.parameters(), "lr": self.actor_lr},
407
+ ]
408
+
409
+ # 如果需要优化critic,添加其参数
410
+ if self.optimize_critic:
411
+ optimizer_params.append({
412
+ "params": self.critic.critic.parameters(),
413
+ "lr": self.critic_lr
414
+ })
415
+
416
+ # 创建单个优化器
417
+ optimizer = Adam(optimizer_params)
418
+
419
+ return optimizer
420
+
421
+
422
+ class FilteredBehaviouralCloning(BehaviouralCloning):
423
+ def __init__(self, lr: float, filter: float):
424
+ super().__init__(lr)
425
+
426
+ self.filter = filter
427
+
428
+ def configure_callbacks(self):
429
+ return FilterDataset(filter=self.filter)
430
+
431
+
432
+ class FilterDataset(Callback):
433
+ def __init__(self, filter: float):
434
+ self.filter = filter
435
+
436
+ def on_fit_start(self, trainer, algorithm):
437
+ print("*** Filtering Dataset ***")
438
+ dataset = trainer.datamodule.dataset
439
+ print("Statistics of Input Dataset")
440
+ print("Number of Trajectories:", dataset.nTrajectories())
441
+ print("Number of Trajectories:", len(dataset))
442
+ dataset.keep_top_fraction_of_trajectories(fraction=self.filter)
443
+ trainer.datamodule.dataset = dataset
444
+ print("Statistics of Filtered Dataset")
445
+ print("Number of Trajectories:", dataset.nTrajectories())
446
+ print("Number of Trajectories:", len(dataset))
447
+
448
+
449
+ class ActorCritic(Agent):
450
+ def __init__(
451
+ self,
452
+ model_name_or_path: str,
453
+ actor_lr: float,
454
+ critic_lr: float,
455
+ tau: float,
456
+ accumulate_grad_batches: int,
457
+ discount_factor: float,
458
+ critic_expectile: float,
459
+ optimize_critic: bool,
460
+ actor_checkpoint=None,
461
+ critic_checkpoint=None,
462
+ **kwargs
463
+ ):
464
+ super().__init__() # Initialize LLM base class
465
+ self.example_input_array = (torch.zeros(1, 1, dtype=torch.long),)
466
+ self.save_hyperparameters()
467
+ ### Config
468
+ self.actor_lr = actor_lr
469
+ self.critic_lr = critic_lr
470
+ self.discount_factor = discount_factor
471
+ self.tau = tau
472
+
473
+ ### Manual Gradient Accumulation
474
+ self.accumulate_grad_batches = accumulate_grad_batches
475
+ self.automatic_optimization = False
476
+
477
+ ### Initialization
478
+ self.actor = ActorModel(
479
+ get_device=lambda: self.device, model_name_or_path=model_name_or_path, actor_checkpoint=actor_checkpoint
480
+ )
481
+ self.critic = RobertaCritic(
482
+ get_device=lambda: self.device,
483
+ discount_factor=discount_factor,
484
+ tau=tau,
485
+ expectile=critic_expectile,
486
+ from_checkpoint=critic_checkpoint,
487
+ )
488
+
489
+ self.actor_current_backward_step = 0
490
+ self.critic_current_backward_step = 0
491
+ self.critic_warmup_gradient_steps = 0
492
+
493
+ self.optimize_actor = lambda: (
494
+ True
495
+ if self.critic_current_backward_step // self.accumulate_grad_batches
496
+ >= self.critic_warmup_gradient_steps
497
+ else False
498
+ )
499
+ self.optimize_critic = lambda: optimize_critic
500
+
501
+ def forward(self, observation, **kwargs):
502
+ action = self.actor.forward(observation, **kwargs)
503
+ return action
504
+
505
+ def training_step(self, batch, batch_idx):
506
+ # if batch_idx == 3:
507
+ # return
508
+
509
+ optimizer = self.optimizers()
510
+ mem = torch.cuda.memory_allocated() / torch.cuda.get_device_properties(0).total_memory
511
+ if mem > 0.8:
512
+ gc.collect()
513
+ torch.cuda.empty_cache()
514
+
515
+ if self.optimize_critic():
516
+ # scale losses by 1/N (for N batches of gradient accumulation)
517
+ critic_loss, critic_log = self.critic_loss(batch)
518
+ critic_loss /= self.accumulate_grad_batches
519
+ self.manual_backward(critic_loss)
520
+ self.critic_current_backward_step += 1
521
+ self.log_dict(critic_log, sync_dist=True)
522
+
523
+ # accumulate gradients of N batches
524
+ if self.critic_current_backward_step % self.accumulate_grad_batches == 0:
525
+ optimizer.step()
526
+ optimizer.zero_grad()
527
+ self.critic.soft_update_target_critic(self.tau)
528
+
529
+
530
+ if self.optimize_actor():
531
+ # scale losses by 1/N (for N batches of gradient accumulation)
532
+
533
+ actor_loss, actor_log = self.actor_loss(batch)
534
+ actor_loss /= self.accumulate_grad_batches
535
+ self.manual_backward(actor_loss)
536
+ self.actor_current_backward_step += 1
537
+ self.log_dict(actor_log, sync_dist=True)
538
+
539
+ # accumulate gradients of N batches
540
+ if self.actor_current_backward_step % self.accumulate_grad_batches == 0:
541
+ optimizer.step()
542
+ optimizer.zero_grad()
543
+
544
+ def get_actor_log(self, loss, advantages, log_prob):
545
+ return {
546
+ "actor/loss": loss.detach(),
547
+ "actor/advantages.mean": advantages.detach().mean(),
548
+ "actor/advantages.max": torch.max(advantages.detach()),
549
+ "actor/advantages.min": torch.min(advantages.detach()),
550
+ "actor/log_prob.mean": torch.mean(log_prob.detach()),
551
+ "actor/log_prob.max": torch.max(log_prob.detach()),
552
+ "actor/log_prob.min": torch.min(log_prob.detach()),
553
+ }
554
+
555
+ def configure_optimizers(self):
556
+ from torch.optim import Adam
557
+
558
+ optimizer_params = []
559
+
560
+ if hasattr(self, 'actor') and hasattr(self.actor, 'parameters'):
561
+ optimizer_params.append({
562
+ "params": self.actor.parameters(),
563
+ "lr": self.actor_lr
564
+ })
565
+
566
+ if self.optimize_critic and hasattr(self, 'critic') and hasattr(self.critic, 'parameters'):
567
+ optimizer_params.append({
568
+ "params": self.critic.parameters(),
569
+ "lr": self.critic_lr
570
+ })
571
+
572
+ if not optimizer_params:
573
+ return None
574
+
575
+ optimizer = Adam(optimizer_params)
576
+ return optimizer
577
+
578
+
579
+ class OfflineArcher(ActorCritic):
580
+ def __init__(
581
+ self,
582
+ model_name_or_path: str,
583
+ inv_temp: float,
584
+ actor_lr: float,
585
+ critic_lr: float,
586
+ tau: float,
587
+ accumulate_grad_batches: int,
588
+ discount_factor: float,
589
+ critic_expectile: float,
590
+ optimize_critic: bool,
591
+ actor_checkpoint: Optional[str] = None,
592
+ critic_checkpoint: Optional[str] = None,
593
+ **kwargs
594
+ ):
595
+ super().__init__(
596
+ model_name_or_path=model_name_or_path,
597
+ actor_lr=actor_lr,
598
+ critic_lr=critic_lr,
599
+ tau=tau,
600
+ accumulate_grad_batches=accumulate_grad_batches,
601
+ discount_factor=discount_factor,
602
+ critic_expectile=critic_expectile,
603
+ optimize_critic=optimize_critic,
604
+ actor_checkpoint=actor_checkpoint,
605
+ critic_checkpoint=critic_checkpoint,
606
+ **kwargs
607
+ )
608
+
609
+ self.inv_temp = inv_temp
610
+
611
+ self.actor_loss = lambda batch: self.awr_loss(**batch)
612
+ self.critic_loss = lambda batch: self.critic.iql_loss(**batch)
613
+
614
+ def awr_loss(self, observation, action, **kwargs):
615
+ log_prob = self.actor.get_logsum_prob(observation, action)
616
+ with torch.no_grad():
617
+ advantages = self.critic.get_advantages(observation, action)
618
+
619
+ advantages = advantages.flatten()
620
+ log_prob = log_prob.flatten()
621
+ factor = torch.exp(self.inv_temp * advantages)
622
+ loss_batch = -factor * log_prob
623
+ loss = loss_batch.mean()
624
+
625
+ # ### Log and print what's happening
626
+ log = self.get_actor_log(loss=loss, advantages=advantages, log_prob=log_prob)
627
+ log = {
628
+ **log,
629
+ **{
630
+ "actor/factor.mean": factor.detach().mean(),
631
+ "actor/factor.max": torch.max(factor.detach()),
632
+ "actor/factor.min": torch.min(factor.detach()),
633
+ },
634
+ }
635
+
636
+ return loss, log
637
+
638
+ def configure_optimizers(self):
639
+ # 直接调用基类方法
640
+ return super().configure_optimizers()
641
+
642
+ @rank_zero_only
643
+ def on_save_checkpoint(self, checkpoint):
644
+ """保存 LoRA 适配器权重"""
645
+ super().on_save_checkpoint(checkpoint)
646
+
647
+ save_dir = self.trainer.default_root_dir
648
+ os.makedirs(save_dir, exist_ok=True)
649
+
650
+ # 保存 LoRA 适配器
651
+ if hasattr(self.actor.model, "save_pretrained"):
652
+ self.actor.model.save_pretrained(save_dir)
653
+
654
+ # 保存 tokenizer
655
+ if hasattr(self.actor, "tokenizer"):
656
+ self.actor.tokenizer.save_pretrained(save_dir)
657
+
658
+ print(f"✅ LoRA adapter saved to: {save_dir}")
659
+ self.merge_and_save_lora(os.path.join(save_dir, "merged_model"))
660
+
661
+ def merge_and_save_lora(self, save_dir):
662
+ """
663
+ Merge the LoRA adapter weights into the base model and save the merged model and tokenizer.
664
+ """
665
+ # Only proceed if the actor model has the correct method
666
+ try:
667
+ # 确保模型在CPU上且处于eval模式
668
+ original_device = next(self.actor.model.parameters()).device
669
+ self.actor.model.to('cpu')
670
+ self.actor.model.eval()
671
+
672
+ if hasattr(self.actor.model, "merge_and_unload"):
673
+ # 执行合并
674
+ merged_model = self.actor.model.merge_and_unload()
675
+
676
+ # 检查合并结果
677
+ from peft import PeftModel
678
+ if isinstance(merged_model, PeftModel):
679
+ print(">>> [Warning] Still a PeftModel after merge. Using base_model.model...")
680
+ merged_model = merged_model.base_model.model
681
+
682
+ # 保存合并后的模型
683
+ merged_model.save_pretrained(os.path.join(save_dir, "merged_model"))
684
+ print(f"✅ Merged model saved to: {os.path.join(save_dir, 'merged_model')}")
685
+ else:
686
+ print("❌ merge_and_unload method not found in actor.model. Cannot merge LoRA weights.")
687
+ except Exception as e:
688
+ print(f"❌ Error merging LoRA weights: {e}")
689
+ import traceback
690
+ traceback.print_exc()
691
+ finally:
692
+ # 恢复原始设备
693
+ self.actor.model.to(original_device)
694
+
695
+
696
+ class IQLKL(ActorCritic):
697
+ def __init__(self, kl_coeff: float, reference_actor_path, **kwargs):
698
+ super().__init__(**kwargs)
699
+
700
+ self.kl_coeff = kl_coeff
701
+ self.reference_actor = ActorModel(
702
+ get_device=lambda: self.device, from_checkpoint=reference_actor_path
703
+ )
704
+
705
+ self.actor_loss = lambda batch: self.advantage_kl_loss(**batch)
706
+ self.critic_loss = lambda batch: self.critic.iql_loss(**batch)
707
+
708
+ def advantage_kl_loss(self, observation, **kwargs):
709
+ reinforce_loss, generated_output = self.reinforce_loss(observation=observation)
710
+ with torch.no_grad():
711
+ reference_log_prob = self.reference_actor.get_logsum_prob(
712
+ observation, generated_output["action"]
713
+ )
714
+
715
+ ratio = generated_output["log_prob"] - reference_log_prob
716
+ kl_loss = (ratio.detach() + 1.0) * generated_output["log_prob"]
717
+ loss = (1 - self.kl_coeff) * reinforce_loss + self.kl_coeff * kl_loss
718
+ log = generated_output["log"]
719
+ log = {
720
+ **log,
721
+ "reference_log_prob.mean": reference_log_prob.mean(),
722
+ "reference_log_prob.max": reference_log_prob.max(),
723
+ "reference_log_prob.min": reference_log_prob.min(),
724
+ }
725
+ log = {
726
+ **log,
727
+ "kl_loss.mean": kl_loss.mean(),
728
+ "kl_loss.max": kl_loss.max(),
729
+ "kl_loss.min": kl_loss.min(),
730
+ }
731
+ log = {**log, "actor_loss.mean": loss.mean(), "ratio": ratio.mean()}
732
+
733
+ return loss.mean(), log
734
+
735
+ def reinforce_loss(self, observation, **kwargs):
736
+ ### Reinforce Loss
737
+ action = self.actor.forward(observation)
738
+ log_prob = self.actor.get_logsum_prob(observation, action)
739
+
740
+ with torch.no_grad():
741
+ advantages = self.critic.get_advantages(observation, action)
742
+
743
+ loss = -advantages.flatten() * log_prob
744
+
745
+ ### Logging
746
+ log = self.get_actor_log(
747
+ loss=torch.mean(loss.detach()), advantages=advantages, log_prob=log_prob
748
+ )
749
+ # self.log_dict(log)
750
+ return loss, {
751
+ "log_prob": log_prob,
752
+ "advantages": advantages,
753
+ "action": action,
754
+ "log": log,
755
+ }
ArcherCritic.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers import AutoTokenizer, RobertaModel
4
+ import torch.nn as nn
5
+ from transformers import RobertaTokenizer, RobertaModel
6
+ import logging
7
+ # A logger for this file
8
+ log = logging.getLogger(__name__)
9
+
10
+ class ArcherDoubleCritic(torch.nn.Module):
11
+ def __init__(self, in_dim, out_dim):
12
+ super(ArcherDoubleCritic, self).__init__()
13
+ self.base_lm = RobertaModel.from_pretrained('roberta-base', torch_dtype=torch.float16)
14
+
15
+ ################
16
+ print("*** Master Warning - Are these used? *** ")
17
+ # self.base_lm.pooler.dense.weight = None
18
+ # self.base_lm.pooler.dense.bias = None
19
+ ###############
20
+ self.base_tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
21
+ self.base_tokenizer.truncation_side = 'left'
22
+ self.critic1 = nn.Sequential(nn.Linear(in_dim*2, in_dim),\
23
+ nn.ReLU(),\
24
+ nn.Linear(in_dim, in_dim),\
25
+ nn.ReLU(),\
26
+ nn.Linear(in_dim, out_dim))#.to(device)
27
+ self.critic2 = nn.Sequential(nn.Linear(in_dim*2, in_dim),\
28
+ nn.ReLU(),\
29
+ nn.Linear(in_dim, in_dim),\
30
+ nn.ReLU(),\
31
+ nn.Linear(in_dim, out_dim))#.to(device)
32
+ self.v_critic1 = nn.Sequential(nn.Linear(in_dim, in_dim),\
33
+ nn.ReLU(),\
34
+ nn.Linear(in_dim, in_dim),\
35
+ nn.ReLU(),\
36
+ nn.Linear(in_dim, out_dim))#.to(device)
37
+ self.v_critic2 = nn.Sequential(nn.Linear(in_dim, in_dim),\
38
+ nn.ReLU(),\
39
+ nn.Linear(in_dim, in_dim),\
40
+ nn.ReLU(),\
41
+ nn.Linear(in_dim, out_dim))#.to(device)
42
+
43
+ def get_q(self, observation, action, detach_model=False):
44
+ state_actions = [o + a for o,a in zip(observation, action)]
45
+ obs_ids = self.base_tokenizer(observation, padding = True, return_tensors='pt', truncation=True, max_length=512).to(self.base_lm.device)
46
+ if detach_model:
47
+ with torch.no_grad():
48
+ lm_states = self.base_lm(**obs_ids).last_hidden_state[:,0]
49
+ else:
50
+ lm_states = self.base_lm(**obs_ids).last_hidden_state[:,0]
51
+ action_ids = self.base_tokenizer(action, padding = True, return_tensors='pt', truncation=True, max_length=512).to(self.base_lm.device)
52
+ if detach_model:
53
+ with torch.no_grad():
54
+ action_states = self.base_lm(**action_ids).last_hidden_state[:,0]
55
+ else:
56
+ action_states = self.base_lm(**action_ids).last_hidden_state[:,0]
57
+ lm_states = torch.cat([lm_states, action_states], dim = 1)
58
+ return self.critic1(lm_states), self.critic2(lm_states)
59
+
60
+ def get_v(self, observation,detach_model=False):
61
+ obs_ids = self.base_tokenizer(observation, padding = True, return_tensors='pt', truncation=True, max_length=512).to(self.base_lm.device)
62
+ if detach_model:
63
+ with torch.no_grad():
64
+ lm_states = self.base_lm(**obs_ids).last_hidden_state[:,0]
65
+ else:
66
+ lm_states = self.base_lm(**obs_ids).last_hidden_state[:,0]
67
+ # print(action.size())
68
+ return self.v_critic1(lm_states), self.v_critic2(lm_states)
69
+
70
+
71
+ class ArcherCritic(torch.nn.Module):
72
+ def __init__(self, in_dim=768, out_dim=4096, dropout = 0.5):
73
+ super(ArcherCritic, self).__init__()
74
+ self.model = AutoModelForCausalLM.from_pretrained('gpt2')
75
+ self.critic = ArcherDoubleCritic(in_dim = 768, out_dim = 1)
76
+ self.target_critic = ArcherDoubleCritic(in_dim = 768, out_dim = 1)
77
+ self.soft_update_target_critic(1)
78
+ self.tokenizer = AutoTokenizer.from_pretrained('gpt2', trust_remote_code=True)
79
+ self.tokenizer.truncation_side = 'left'
80
+ self.tokenizer.pad_token = self.tokenizer.eos_token
81
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
82
+ # self.device = device
83
+ self.dropout = torch.nn.Dropout(p=dropout)
84
+ self.softmax = torch.nn.Softmax(dim= -1)
85
+
86
+ def get_action(self, observation):
87
+ obs_ids = self.tokenizer(observation, return_tensors='pt', padding=True, max_length=512).to(self.model.device)
88
+ obs_embeds = self.model.get_input_embeddings()(obs_ids["input_ids"])
89
+ outputs = self.model.generate(inputs_embeds=obs_embeds, attention_mask=obs_ids['attention_mask'],\
90
+ max_new_tokens=32, do_sample=True, \
91
+ pad_token_id = self.tokenizer.eos_token_id)#.cpu()
92
+ raw_action = self.tokenizer.batch_decode(outputs, skip_special_tokens = True)
93
+ return raw_action
94
+
95
+ def get_q(self, observation, action, detach_model=False):
96
+ return self.critic.get_q(observation, action, detach_model = detach_model)
97
+
98
+ def get_v(self, inputs, detach_model=False):
99
+ return self.critic.get_v(inputs, detach_model = detach_model)
100
+
101
+ def get_target_q(self, observation, action, detach_model=False):
102
+ return self.target_critic.get_q(observation, action, detach_model = detach_model)
103
+
104
+ def get_log_prob(self, observation, action):
105
+ obs_ids = self.tokenizer(observation, return_tensors='pt', padding=True, max_length=512).to(self.model.device)
106
+ action_ids = self.tokenizer(action, return_tensors='pt', padding=True, max_length=512).to(self.model.device)
107
+ action_embeds = self.model.get_input_embeddings()(action_ids["input_ids"]).detach()
108
+ obs_embeds = self.model.get_input_embeddings()(obs_ids["input_ids"]).detach()
109
+ input_embeds = torch.cat([obs_embeds, action_embeds], dim = 1)
110
+ attention_mask = torch.cat([obs_ids["attention_mask"], action_ids["attention_mask"]],\
111
+ dim = 1)
112
+ outputs = self.model(inputs_embeds = input_embeds, attention_mask = attention_mask)
113
+ prediction_probs = self.softmax(outputs.logits)
114
+ selected_prediction_probs = torch.take_along_dim(prediction_probs[:, obs_ids["attention_mask"].size(1)-1:-1],\
115
+ action_ids["input_ids"].unsqueeze(2), dim=2).squeeze(2)
116
+ logsum_probs = torch.sum(torch.log(selected_prediction_probs)*action_ids["attention_mask"], dim = 1)
117
+ return logsum_probs
118
+
119
+ def soft_update_target_critic(self, tau):
120
+ # for target_critic, critic in zip(self.target_critics, self.critics):
121
+ for target_param, param in zip(
122
+ self.target_critic.parameters(), self.critic.parameters()
123
+ ):
124
+ target_param.data.copy_(
125
+ target_param.data * (1.0 - tau) + param.data * tau
126
+ )
127
+
128
+ class DoubleCritic(torch.nn.Module):
129
+ """
130
+ a double critic without base lm
131
+ """
132
+ def __init__(self, in_dim, out_dim):
133
+ super(DoubleCritic, self).__init__()
134
+ # self.device = device
135
+ self.critic1 = nn.Sequential(nn.Linear(in_dim*2, in_dim),\
136
+ nn.ReLU(),\
137
+ nn.Linear(in_dim, in_dim),\
138
+ nn.ReLU(),\
139
+ nn.Linear(in_dim, out_dim))#.to(device)
140
+ self.critic2 = nn.Sequential(nn.Linear(in_dim*2, in_dim),\
141
+ nn.ReLU(),\
142
+ nn.Linear(in_dim, in_dim),\
143
+ nn.ReLU(),\
144
+ nn.Linear(in_dim, out_dim))#.to(device)
145
+ self.v_critic1 = nn.Sequential(nn.Linear(in_dim, in_dim),\
146
+ nn.ReLU(),\
147
+ nn.Linear(in_dim, in_dim),\
148
+ nn.ReLU(),\
149
+ nn.Linear(in_dim, out_dim))#.to(device)
150
+ self.v_critic2 = nn.Sequential(nn.Linear(in_dim, in_dim),\
151
+ nn.ReLU(),\
152
+ nn.Linear(in_dim, in_dim),\
153
+ nn.ReLU(),\
154
+ nn.Linear(in_dim, out_dim))#.to(device)
155
+
156
+ def get_q(self, observation, action, detach_model=False):
157
+ lm_states = torch.cat([observation, action], dim = 1)
158
+ return self.critic1(lm_states), self.critic2(lm_states)
159
+
160
+ def get_v(self, observation,detach_model=False):
161
+ return self.v_critic1(observation), self.v_critic2(observation)
Dataset.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import Dataset
2
+ import numpy as np
3
+ import copy
4
+ from torch.utils.data import Dataset
5
+
6
+ class DummyDataset(Dataset):
7
+ def __init__(self, buffer):
8
+ self.buffer = buffer
9
+
10
+ def __len__(self):
11
+ return len(self.buffer)
12
+
13
+ def __getitem__(self, idx):
14
+ return self.buffer[idx]
15
+
16
+
17
+ class Transition:
18
+ def __init__(self, observation, action, reward, next_observation, done, **kwargs):
19
+ self.observation = observation
20
+ self.action = action
21
+ self.reward = np.single(reward)
22
+ self.next_observation = next_observation
23
+
24
+ if isinstance(done, bool):
25
+ self.done = done
26
+ elif done == 'False':
27
+ self.done = False
28
+ elif done == 'True':
29
+ self.done = True
30
+ else:
31
+ raise ValueError
32
+
33
+ # internal, to see how many times a certain transition was sampled
34
+ self.times_was_sampled = 0
35
+ def as_dict(self, as_string = False):
36
+ return {
37
+ "observation": self.observation,
38
+ "action": self.action,
39
+ "reward": self.reward if as_string == False else str(self.reward),
40
+ "next_observation": self.next_observation,
41
+ "done": self.done if as_string == False else str(self.done)
42
+ }
43
+ def __str__(self):
44
+ printout = '\n'
45
+ for key in self.as_dict():
46
+ printout += "\n" + key + ':'
47
+ printout += '\n' + str(self.as_dict()[key])
48
+ return printout
49
+
50
+ class Trajectory:
51
+ def __init__(self):
52
+ self.transitions = []
53
+ self.info = {}
54
+ def __len__(self):
55
+ return len(self.transitions)
56
+ def check_consistency(self):
57
+ assert(any([transition.done for transition in self.transitions[:-1]]) == False) # should not be done until the end
58
+ assert(self.transitions[-1].done == True )
59
+ for t in range(1,len(self.transitions)):
60
+ prior_transition = self.transitions[t-1]
61
+ current_transition = self.transitions[t]
62
+ assert(prior_transition.next_observation == current_transition.observation)
63
+ def get_rewards(self):
64
+ return [transition.reward for transition in self.transitions]
65
+ def get_return(self):
66
+ return sum([transition.reward for transition in self.transitions])
67
+ def append(self, transition):
68
+ assert(self.transitions == [] or self.transitions[-1].done == False)
69
+ self.transitions.append(Transition(**transition))
70
+ def __str__(self):
71
+ printout = '\n*** Trajectory Begins *** \n'
72
+ printout += "\nTrajectory Length: " + str(len(self))
73
+ for idx, transition in enumerate(self.transitions):
74
+ printout += "\nTransition: " + str(idx)
75
+ printout += "\n" + transition.__str__()
76
+ if self.info != None:
77
+ printout += "\nFound Special Items"
78
+ printout += str(self.info)
79
+ printout += '\n *** Trajectory Ends **** \n'
80
+ return printout
81
+
82
+ class TrajectoryDataset(Dataset):
83
+ def __init__(self):
84
+ self.trajectories = []
85
+ self.samples = [] # pointer list for fast sampling
86
+ self._last_oar = None # Last (observation, action, reward) for sequential addition
87
+ def __len__(self):
88
+ return len(self.samples)
89
+ def __getitem__(self, idx):
90
+ self.samples[idx].times_was_sampled += 1
91
+ ### Must return a copy to avoid issues if further processing is done
92
+ return copy.deepcopy(self.samples[idx].as_dict())
93
+ def append_trajectory(self, trajectory: Trajectory):
94
+ trajectory.check_consistency()
95
+ assert(self.last_trajectory_reached_end())
96
+ for transition in trajectory.transitions:
97
+ self.append_sample_sequentially(copy.deepcopy(transition.as_dict()))
98
+ self.trajectories[-1].info = copy.deepcopy(trajectory.info)
99
+ self.trajectories[-1].check_consistency()
100
+ def append_observation_action_reward(self, observation, action, reward):
101
+ if self._last_oar != None:
102
+ self.append_sample_sequentially({"observation": self._last_oar["observation"],
103
+ "action": self._last_oar["action"],
104
+ "reward": self._last_oar["reward"],
105
+ "next_observation": observation,
106
+ "done": False })
107
+ self._last_oar = {"observation": observation,
108
+ "action": action,
109
+ "reward": reward}
110
+ def append_terminal_observation(self, observation, trajectory_info = None):
111
+ assert self._last_oar != None
112
+ self.append_sample_sequentially({"observation": self._last_oar["observation"],
113
+ "action": self._last_oar["action"],
114
+ "reward": self._last_oar["reward"],
115
+ "next_observation": observation,
116
+ "done": True })
117
+ self._last_oar = None
118
+ if trajectory_info != None:
119
+ self.trajectories[-1].info = trajectory_info
120
+ self.trajectories[-1].check_consistency()
121
+
122
+ def last_trajectory_reached_end(self):
123
+ return (self.trajectories == [] or self.trajectories[-1].transitions[-1].done)
124
+
125
+ def append_sample_sequentially(self, transition):
126
+ ### is the trajectory new?
127
+ if self.last_trajectory_reached_end():
128
+ self.trajectories.append(Trajectory())
129
+ self.trajectories[-1].transitions.append(Transition(**transition))
130
+ self.samples.append(self.trajectories[-1].transitions[-1])
131
+ def nTrajectories(self):
132
+ return len(self.trajectories)
133
+ def get_all_trajectory_returns(self):
134
+ return np.asarray([trajectory.get_return() for trajectory in self.trajectories])
135
+ def check_consistency(self):
136
+ assert (sum([len(trajectory) for trajectory in self.trajectories]) == len(self.samples))
137
+ for trajectory in self.trajectories:
138
+ trajectory.check_consistency()
139
+ def sample(self, batch_size=None):
140
+ if batch_size is None:
141
+ batch_size = self.batch_size
142
+ rand_indices = np.random.randint(0, len(self.samples), size=(batch_size,))
143
+ # rand_indices = [np.random.randint(0, len(self.samples)) for _ in range(batch_size)]
144
+ for idx in rand_indices:
145
+ self.samples[idx].times_was_sampled += 1
146
+ return {
147
+ "observation": [self.samples[idx].observation for idx in rand_indices],
148
+ "action": [self.samples[idx].action for idx in rand_indices],
149
+ "reward": [self.samples[idx].reward for idx in rand_indices],
150
+ "next_observation": [self.samples[idx].next_observation for idx in rand_indices],
151
+ "done": [self.samples[idx].done for idx in rand_indices],
152
+ }
153
+ def mean_trajectory_return(self):
154
+ return np.mean(self.get_all_trajectory_returns())
155
+ def std_trajectory_return(self):
156
+ return np.std(self.get_all_trajectory_returns())
157
+ def merge(self, dataset):
158
+ self.check_consistency()
159
+ dataset.check_consistency()
160
+ for trajectory in dataset.trajectories:
161
+ for transition in trajectory.transitions:
162
+ self.append_sample_sequentially(transition.as_dict())
163
+ self.trajectories[-1].info = copy.deepcopy(trajectory.info)
164
+ self.check_consistency()
165
+ # assert(self.batch_size == dataset.batch_size)
166
+ def __str__(self):
167
+ printout = '\n \n '
168
+ printout += '\n ************************ '
169
+ printout += '\n *** Printing Dataset *** '
170
+ printout += '\n ************************ '
171
+ printout += '\n \n '
172
+ printout += '\n Number of Samples : ' + str(len(self))
173
+ printout += '\n Dataset Trajectories : ' + str(self.nTrajectories()) + '\n'
174
+ for idx, trajectory in enumerate(self.trajectories):
175
+ printout += "\n >>> Trajectory id: " + str(idx) + '\n'
176
+ printout += trajectory.__str__()
177
+ if self._last_oar != None:
178
+ printout += "\n !!! Found incomplete transition !!! \n"
179
+ for key in self._last_oar:
180
+ printout += key + '\n'
181
+ printout += str(self._last_oar[key]) + "\n"
182
+ printout += '\n ************************ '
183
+ printout += '\n *** Dataset Printed *** '
184
+ printout += '\n ************************ '
185
+ return printout
186
+
187
+ def keep_top_fraction_of_trajectories(self, fraction: float, from_high_to_low = True):
188
+ self.sort(from_high_to_low=from_high_to_low)
189
+ trajectories = self.trajectories
190
+ import math
191
+ nTraj_to_keep = int(fraction * self.nTrajectories())
192
+ self.__init__()
193
+ for i in range(nTraj_to_keep):
194
+ self.append_trajectory(trajectories[i])
195
+ print("*** Kept ", self.nTrajectories(), " trajectories")
196
+
197
+ def keep_bottom_fraction_of_trajectories(self, fraction: float):
198
+ self.keep_top_fraction_of_trajectories(fraction=fraction, from_high_to_low=False)
199
+
200
+
201
+ def max_trajectory_return(self):
202
+ return max(self.get_all_trajectory_returns())
203
+
204
+ def argmax_trajectory_return(self):
205
+ return np.argmax(self.get_all_trajectory_returns())
206
+
207
+ def min_trajectory_return(self):
208
+ return min(self.get_all_trajectory_returns())
209
+
210
+ def argmin_trajectory_return(self):
211
+ return np.argmin(self.get_all_trajectory_returns())
212
+
213
+ def sort(self, from_high_to_low):
214
+ print("Warning: new dataset created!")
215
+ returns = [trajectory.get_return() for trajectory in self.trajectories]
216
+ sorted_trajectories = sort_list(self.trajectories, returns, from_high_to_low)
217
+ self.__init__()
218
+ for traj in sorted_trajectories:
219
+ self.append_trajectory(traj)
220
+
221
+ # useful to set all rewards to eg -1 and encourage reaching the goal faster
222
+ def set_all_rewards_to_value(self, value):
223
+ for sample in self.samples:
224
+ sample.reward = np.single(value)
225
+
226
+ def scale_all_rewards_by_value(self, value):
227
+ for sample in self.samples:
228
+ sample.reward *= np.single(value)
229
+
230
+ def add_value_to_all_rewards(self, value):
231
+ for sample in self.samples:
232
+ sample.reward += np.single(value)
233
+
234
+ def increase_final_reward_by_value(self, value):
235
+ for trajectory in self.trajectories:
236
+ trajectory.transitions[-1].reward += np.single(value)
237
+
238
+ def append_eos_token_to_all_actions(self, eos_token):
239
+ for sample in self.samples:
240
+ sample.action += eos_token
241
+
242
+ def push_all_rewards_at_the_end_of_the_trajectory(self):
243
+ for trajectory in self.trajectories:
244
+ trajectory.transitions[-1].reward = np.single(trajectory.get_return())
245
+ for transition in trajectory.transitions[:-1]:
246
+ transition.reward = np.single(0)
247
+ assert(- len(trajectory) == trajectory.get_return() == trajectory.transitions[-1].reward)
248
+
249
+ def save(self, filename):
250
+ import json
251
+ self.check_consistency()
252
+ with open(filename, "w") as final:
253
+ json.dump([sample.as_dict(as_string = True) for sample in self.samples], final)
254
+
255
+ def load(self, filename):
256
+ import json
257
+ with open(filename, "r") as final:
258
+ data = json.load(final)
259
+ for sample in data:
260
+ self.append_sample_sequentially(sample)
261
+
262
+ def times_was_sampled(self):
263
+ return [sample.times_was_sampled for sample in self.samples]
264
+
265
+ def keep_only_trajectories_with_exact_key_and_value(self, key, value):
266
+ trajectories = self.trajectories
267
+ new_dataset = TrajectoryDataset()
268
+ for trajectory in trajectories:
269
+ if trajectory.info[key] == value:
270
+ new_dataset.append_trajectory(trajectory)
271
+ return new_dataset
272
+
273
+ def construct_tabular_state_action_space(self):
274
+ self.state_space = Counter()
275
+ self.action_space = Counter()
276
+ self.state_action_space = Counter()
277
+ for sample in self.samples:
278
+ self.state_space.add(sample.observation)
279
+ self.action_space.add(sample.action)
280
+ self.state_action_space.add((sample.observation, sample.action))
281
+
282
+ def assert_deterministic(self):
283
+ successor_states = {}
284
+ rewards = {}
285
+ for sample in self.samples:
286
+ sa = (sample.observation, sample.action)
287
+
288
+ if sa not in rewards:
289
+ rewards[sa] = sample.reward
290
+
291
+ else:
292
+ assert(rewards[sa] == sample.reward)
293
+
294
+ if sample.done: # end transition may be ill-defined
295
+ continue
296
+
297
+ if sa not in successor_states:
298
+ successor_states[sa] = sample.next_observation
299
+ else:
300
+ assert(successor_states[sa] == sample.next_observation)
301
+
302
+ class Counter():
303
+ def __init__(self):
304
+ self.register = {}
305
+ def add(self, item):
306
+ if item not in self.register:
307
+ self.register[item] = 1
308
+ else:
309
+ self.register[item] += 1
310
+ def contains(self, item):
311
+ return item in self.register
312
+
313
+ def n_samples(self, item):
314
+ return self.register[item]
315
+
316
+ class EmptyDataset():
317
+ def __init__(self, length):
318
+ self.length = length
319
+
320
+ def __len__(self):
321
+ return self.length
322
+
323
+ def __getitem__(self, idx):
324
+ return [0]
325
+
326
+ def sort_list(list1, list2, from_high_to_low):
327
+ # Sorting the List1 based on List2
328
+ return [val for (_, val) in sorted(zip(list2, list1), key=lambda x: x[0], reverse=from_high_to_low)]
Llama3-8B-I_Strategic_log.txt ADDED
The diff for this file is too large to render. See raw diff
 
Llama3-8B-I_Word_log.txt ADDED
The diff for this file is too large to render. See raw diff
 
Qwen3-14B_Strategic_log.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Traceback (most recent call last):
2
+ File "/home/jiashuo/anaconda3/envs/archer/bin/accelerate", line 7, in <module>
3
+ sys.exit(main())
4
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 50, in main
5
+ args.func(args)
6
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1213, in launch_command
7
+ args, defaults, mp_from_config_flag = _validate_launch_command(args)
8
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1043, in _validate_launch_command
9
+ defaults = load_config_from_file(args.config_file)
10
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/accelerate/commands/config/config_args.py", line 46, in load_config_from_file
11
+ raise FileNotFoundError(
12
+ FileNotFoundError: The passed configuration file `/home/jiashuo/codes/OfflineArcher/deepspeed_zero3.syaml` does not exist. Please pass an existing file to `accelerate launch`, or use the default one created through `accelerate config` and run `accelerate launch` without the `--config_file` argument.
Qwen3-14B_Word_log.txt ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The following values were not passed to `accelerate launch` and had defaults used instead:
2
+ `--num_cpu_threads_per_process` was set to `16` to improve out-of-box performance when training on CPUs
3
+ To avoid this warning pass in values for each of the problematic parameters or run `accelerate config`.
4
+ [2025-09-16 23:00:41,059] [INFO] [real_accelerator.py:260:get_accelerator] Setting ds_accelerator to cuda (auto detect)
5
+ [2025-09-16 23:00:42,764] [INFO] [logging.py:107:log_dist] [Rank -1] [TorchCheckpointEngine] Initialized with serialization = False
6
+ W0916 23:00:42.933299 185693 site-packages/torch/distributed/run.py:774]
7
+ W0916 23:00:42.933299 185693 site-packages/torch/distributed/run.py:774] *****************************************
8
+ W0916 23:00:42.933299 185693 site-packages/torch/distributed/run.py:774] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
9
+ W0916 23:00:42.933299 185693 site-packages/torch/distributed/run.py:774] *****************************************
10
+ [rank: 0] Seed set to 42
11
+ [rank: 3] Seed set to 42
12
+ [rank: 2] Seed set to 42
13
+ [rank: 1] Seed set to 42
14
+ [rank: 3] Seed set to 42
15
+ [rank: 0] Seed set to 42
16
+ `torch_dtype` is deprecated! Use `dtype` instead!
17
+ [rank: 2] Seed set to 42
18
+
19
+
20
+ `torch_dtype` is deprecated! Use `dtype` instead!
21
+ [rank: 1] Seed set to 42
22
+
23
+
24
+ `torch_dtype` is deprecated! Use `dtype` instead!
25
+
26
+ `torch_dtype` is deprecated! Use `dtype` instead!
27
+ `torch_dtype` is deprecated! Use `dtype` instead!
28
+ `torch_dtype` is deprecated! Use `dtype` instead!
29
+ `torch_dtype` is deprecated! Use `dtype` instead!
30
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
31
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
32
+ *** Master Warning - Are these used? ***
33
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
34
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
35
+ *** Master Warning - Are these used? ***
36
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
37
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
38
+ *** Master Warning - Are these used? ***
39
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
40
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
41
+ *** Master Warning - Are these used? ***
42
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
43
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
44
+ *** Master Warning - Are these used? ***
45
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
46
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
47
+ *** Master Warning - Are these used? ***
48
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
49
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
50
+ *** Master Warning - Are these used? ***
51
+ Some weights of RobertaModel were not initialized from the model checkpoint at roberta-base and are newly initialized: ['pooler.dense.bias', 'pooler.dense.weight']
52
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
53
+ *** Master Warning - Are these used? ***
54
+ [2025-09-16 23:00:53,766] [INFO] [real_accelerator.py:260:get_accelerator] Setting ds_accelerator to cuda (auto detect)
55
+ [2025-09-16 23:00:53,921] [INFO] [real_accelerator.py:260:get_accelerator] Setting ds_accelerator to cuda (auto detect)
56
+ /home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/fabric/connector.py:571: `precision=bf16` is supported for historical reasons but its usage is discouraged. Please set your precision to bf16-mixed instead!
57
+ [2025-09-16 23:00:54,516] [INFO] [real_accelerator.py:260:get_accelerator] Setting ds_accelerator to cuda (auto detect)
58
+ [2025-09-16 23:00:54,670] [INFO] [real_accelerator.py:260:get_accelerator] Setting ds_accelerator to cuda (auto detect)
59
+ [2025-09-16 23:00:55,689] [INFO] [logging.py:107:log_dist] [Rank -1] [TorchCheckpointEngine] Initialized with serialization = False
60
+ [2025-09-16 23:00:55,761] [INFO] [logging.py:107:log_dist] [Rank -1] [TorchCheckpointEngine] Initialized with serialization = False
61
+ initializing deepspeed distributed: GLOBAL_RANK: 3, MEMBER: 4/4
62
+ initializing deepspeed distributed: GLOBAL_RANK: 2, MEMBER: 3/4
63
+ [2025-09-16 23:00:56,385] [INFO] [logging.py:107:log_dist] [Rank -1] [TorchCheckpointEngine] Initialized with serialization = False
64
+ GPU available: True (cuda), used: True
65
+ TPU available: False, using: 0 TPU cores
66
+ HPU available: False, using: 0 HPUs
67
+ initializing deepspeed distributed: GLOBAL_RANK: 0, MEMBER: 1/4
68
+ [2025-09-16 23:00:56,741] [INFO] [logging.py:107:log_dist] [Rank -1] [TorchCheckpointEngine] Initialized with serialization = False
69
+ initializing deepspeed distributed: GLOBAL_RANK: 1, MEMBER: 2/4
70
+ wandb: Currently logged in as: jessie-w to https://api.wandb.ai. Use `wandb login --relogin` to force relogin
71
+ wandb: creating run
72
+ wandb: Tracking run with wandb version 0.21.4
73
+ wandb: Run data is saved locally in ./wandb/run-20250916_230057-ao2fiuap
74
+ wandb: Run `wandb offline` to turn off syncing.
75
+ wandb: Syncing run Test-AC-critic_expectile_0.9-inv_temp_1.0
76
+ wandb: ⭐️ View project at https://wandb.ai/jessie-w/WordTaboo-Official
77
+ wandb: 🚀 View run at https://wandb.ai/jessie-w/WordTaboo-Official/runs/ao2fiuap
78
+ The length of the dataset is: 13220
79
+
80
+ *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming
81
+ The length of the dataset is: 13220
82
+
83
+ *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming
84
+ The length of the dataset is: 13220
85
+
86
+ *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming
87
+ /home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/callbacks/model_checkpoint.py:751: Checkpoint directory /home/jiashuo/codes/OfflineArcher/models exists and is not empty.
88
+ The length of the dataset is: 13220
89
+
90
+ *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming
91
+ LOCAL_RANK: 2 - CUDA_VISIBLE_DEVICES: [4,5,6,7]
92
+ LOCAL_RANK: 3 - CUDA_VISIBLE_DEVICES: [4,5,6,7]
93
+ LOCAL_RANK: 1 - CUDA_VISIBLE_DEVICES: [4,5,6,7]
94
+ Enabling DeepSpeed BF16. Model parameters and inputs will be cast to `bfloat16`.
95
+ LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [4,5,6,7]
96
+ Parameter Offload - Persistent parameters statistics: param_count = 601, numel = 5932040
97
+ /home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py:527: Found 1063 module(s) in eval mode at the start of training. This may lead to unexpected behavior during training. If this is intentional, you can ignore this warning.
98
+
99
+ [rank2]: File "/home/jiashuo/codes/OfflineArcher/main.py", line 33, in <module>
100
+ [rank2]: cli_main()
101
+ [rank2]: File "/home/jiashuo/codes/OfflineArcher/main.py", line 22, in cli_main
102
+ [rank2]: cli = LightningCLI(
103
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/cli.py", line 414, in __init__
104
+ [rank2]: self._run_subcommand(self.subcommand)
105
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/cli.py", line 747, in _run_subcommand
106
+ [rank2]: fn(**fn_kwargs)
107
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 560, in fit
108
+ [rank2]: call._call_and_handle_interrupt(
109
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 48, in _call_and_handle_interrupt
110
+ [rank2]: return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
111
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/strategies/launchers/subprocess_script.py", line 105, in launch
112
+ [rank2]: return function(*args, **kwargs)
113
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 598, in _fit_impl
114
+ [rank2]: self._run(model, ckpt_path=ckpt_path)
115
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 1011, in _run
116
+ [rank2]: results = self._run_stage()
117
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/trainer/trainer.py", line 1055, in _run_stage
118
+ [rank2]: self.fit_loop.run()
119
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py", line 216, in run
120
+ [rank2]: self.advance()
121
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/loops/fit_loop.py", line 458, in advance
122
+ [rank2]: self.epoch_loop.run(self._data_fetcher)
123
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py", line 152, in run
124
+ [rank2]: self.advance(data_fetcher)
125
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/loops/training_epoch_loop.py", line 350, in advance
126
+ [rank2]: batch_output = self.manual_optimization.run(kwargs)
127
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/manual.py", line 95, in run
128
+ [rank2]: self.advance(kwargs)
129
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/loops/optimization/manual.py", line 115, in advance
130
+ [rank2]: training_step_output = call._call_strategy_hook(trainer, "training_step", *kwargs.values())
131
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/trainer/call.py", line 329, in _call_strategy_hook
132
+ [rank2]: output = fn(*args, **kwargs)
133
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 390, in training_step
134
+ [rank2]: return self._forward_redirection(self.model, self.lightning_module, "training_step", *args, **kwargs)
135
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 641, in __call__
136
+ [rank2]: wrapper_output = wrapper_module(*args, **kwargs)
137
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
138
+ [rank2]: return self._call_impl(*args, **kwargs)
139
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1784, in _call_impl
140
+ [rank2]: return forward_call(*args, **kwargs)
141
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 20, in wrapped_fn
142
+ [rank2]: ret_val = func(*args, **kwargs)
143
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/runtime/engine.py", line 2131, in forward
144
+ [rank2]: loss = self.module(*inputs, **kwargs)
145
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
146
+ [rank2]: return self._call_impl(*args, **kwargs)
147
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1879, in _call_impl
148
+ [rank2]: return inner()
149
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1827, in inner
150
+ [rank2]: result = forward_call(*args, **kwargs)
151
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/lightning/pytorch/strategies/strategy.py", line 634, in wrapped_forward
152
+ [rank2]: out = method(*_args, **_kwargs)
153
+ [rank2]: File "/home/jiashuo/codes/OfflineArcher/Algorithms.py", line 533, in training_step
154
+ [rank2]: actor_loss, actor_log = self.actor_loss(batch)
155
+ [rank2]: File "/home/jiashuo/codes/OfflineArcher/Algorithms.py", line 611, in <lambda>
156
+ [rank2]: self.actor_loss = lambda batch: self.awr_loss(**batch)
157
+ [rank2]: File "/home/jiashuo/codes/OfflineArcher/Algorithms.py", line 615, in awr_loss
158
+ [rank2]: log_prob = self.actor.get_logsum_prob(observation, action)
159
+ [rank2]: File "/home/jiashuo/codes/OfflineArcher/Algorithms.py", line 130, in get_logsum_prob
160
+ [rank2]: generated_probabilities = self.to_tokens_and_logprobs(alltext)
161
+ [rank2]: File "/home/jiashuo/codes/OfflineArcher/Algorithms.py", line 163, in to_tokens_and_logprobs
162
+ [rank2]: outputs = self.model(input_ids)
163
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
164
+ [rank2]: return self._call_impl(*args, **kwargs)
165
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1879, in _call_impl
166
+ [rank2]: return inner()
167
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1827, in inner
168
+ [rank2]: result = forward_call(*args, **kwargs)
169
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/peft/peft_model.py", line 1850, in forward
170
+ [rank2]: return self.base_model(
171
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
172
+ [rank2]: return self._call_impl(*args, **kwargs)
173
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1879, in _call_impl
174
+ [rank2]: return inner()
175
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1827, in inner
176
+ [rank2]: result = forward_call(*args, **kwargs)
177
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/peft/tuners/tuners_utils.py", line 222, in forward
178
+ [rank2]: return self.model.forward(*args, **kwargs)
179
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/utils/generic.py", line 940, in wrapper
180
+ [rank2]: output = func(self, *args, **kwargs)
181
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/models/qwen3/modeling_qwen3.py", line 480, in forward
182
+ [rank2]: outputs: BaseModelOutputWithPast = self.model(
183
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
184
+ [rank2]: return self._call_impl(*args, **kwargs)
185
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1879, in _call_impl
186
+ [rank2]: return inner()
187
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1827, in inner
188
+ [rank2]: result = forward_call(*args, **kwargs)
189
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/utils/generic.py", line 1064, in wrapper
190
+ [rank2]: outputs = func(self, *args, **kwargs)
191
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/models/qwen3/modeling_qwen3.py", line 410, in forward
192
+ [rank2]: hidden_states = decoder_layer(
193
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/modeling_layers.py", line 94, in __call__
194
+ [rank2]: return super().__call__(*args, **kwargs)
195
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
196
+ [rank2]: return self._call_impl(*args, **kwargs)
197
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1879, in _call_impl
198
+ [rank2]: return inner()
199
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1827, in inner
200
+ [rank2]: result = forward_call(*args, **kwargs)
201
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/utils/deprecation.py", line 172, in wrapped_func
202
+ [rank2]: return func(*args, **kwargs)
203
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/models/qwen3/modeling_qwen3.py", line 275, in forward
204
+ [rank2]: hidden_states = self.mlp(hidden_states)
205
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
206
+ [rank2]: return self._call_impl(*args, **kwargs)
207
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1879, in _call_impl
208
+ [rank2]: return inner()
209
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1827, in inner
210
+ [rank2]: result = forward_call(*args, **kwargs)
211
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/transformers/models/qwen3/modeling_qwen3.py", line 82, in forward
212
+ [rank2]: down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
213
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1773, in _wrapped_call_impl
214
+ [rank2]: return self._call_impl(*args, **kwargs)
215
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1879, in _call_impl
216
+ [rank2]: return inner()
217
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1816, in inner
218
+ [rank2]: args_result = hook(self, args)
219
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 929, in _fn
220
+ [rank2]: return fn(*args, **kwargs)
221
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/runtime/zero/parameter_offload.py", line 298, in _pre_forward_module_hook
222
+ [rank2]: self.pre_sub_module_forward_function(module)
223
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 120, in decorate_context
224
+ [rank2]: return func(*args, **kwargs)
225
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/runtime/zero/parameter_offload.py", line 473, in pre_sub_module_forward_function
226
+ [rank2]: param_coordinator.fetch_sub_module(sub_module, forward=True)
227
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/_dynamo/eval_frame.py", line 929, in _fn
228
+ [rank2]: return fn(*args, **kwargs)
229
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 20, in wrapped_fn
230
+ [rank2]: ret_val = func(*args, **kwargs)
231
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 120, in decorate_context
232
+ [rank2]: return func(*args, **kwargs)
233
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_coordinator.py", line 415, in fetch_sub_module
234
+ [rank2]: self.__all_gather_params(params_to_prefetch, forward)
235
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 20, in wrapped_fn
236
+ [rank2]: ret_val = func(*args, **kwargs)
237
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_coordinator.py", line 475, in __all_gather_params
238
+ [rank2]: self.__all_gather_params_(nonquantized_params, forward, quantize=self.zero_quantized_weights)
239
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/runtime/zero/partitioned_param_coordinator.py", line 504, in __all_gather_params_
240
+ [rank2]: handle = param_group[0].all_gather_coalesced(param_group, quantize=quantize)
241
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 20, in wrapped_fn
242
+ [rank2]: ret_val = func(*args, **kwargs)
243
+ [rank2]: File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/deepspeed/runtime/zero/partition_parameters.py", line 1325, in all_gather_coalesced
244
+ [rank2]: param_buffer = torch.empty(
245
+ [rank2]: torch.AcceleratorError: CUDA error: out of memory
246
+ [rank2]: Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.
247
+
248
+ W0916 23:03:30.410014 185693 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 185929 closing signal SIGTERM
249
+ W0916 23:03:30.412096 185693 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 185930 closing signal SIGTERM
250
+ W0916 23:03:30.414456 185693 site-packages/torch/distributed/elastic/multiprocessing/api.py:900] Sending process 185932 closing signal SIGTERM
251
+ W0916 23:04:00.415664 185693 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 185929 via Signals.SIGTERM, forcefully exiting via Signals.SIGKILL
252
+ W0916 23:04:00.757989 185693 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 185930 via Signals.SIGTERM, forcefully exiting via Signals.SIGKILL
253
+ W0916 23:04:01.076958 185693 site-packages/torch/distributed/elastic/multiprocessing/api.py:919] Unable to shutdown process 185932 via Signals.SIGTERM, forcefully exiting via Signals.SIGKILL
254
+ E0916 23:04:01.370846 185693 site-packages/torch/distributed/elastic/multiprocessing/api.py:874] failed (exitcode: 1) local_rank: 2 (pid: 185931) of binary: /home/jiashuo/anaconda3/envs/archer/bin/python3.10
255
+ Traceback (most recent call last):
256
+ File "/home/jiashuo/anaconda3/envs/archer/bin/accelerate", line 7, in <module>
257
+ sys.exit(main())
258
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py", line 50, in main
259
+ args.func(args)
260
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/accelerate/commands/launch.py", line 1220, in launch_command
261
+ deepspeed_launcher(args)
262
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/accelerate/commands/launch.py", line 906, in deepspeed_launcher
263
+ distrib_run.run(args)
264
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/distributed/run.py", line 892, in run
265
+ elastic_launch(
266
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 143, in __call__
267
+ return launch_agent(self._config, self._entrypoint, list(args))
268
+ File "/home/jiashuo/anaconda3/envs/archer/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 277, in launch_agent
269
+ raise ChildFailedError(
270
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
271
+ ============================================================
272
+ /home/jiashuo/codes/OfflineArcher/main.py FAILED
273
+ ------------------------------------------------------------
274
+ Failures:
275
+ <NO_OTHER_FAILURES>
276
+ ------------------------------------------------------------
277
+ Root Cause (first observed failure):
278
+ [0]:
279
+ time : 2025-09-16_23:03:30
280
+ host : somea6k.local
281
+ rank : 2 (local_rank: 2)
282
+ exitcode : 1 (pid: 185931)
283
+ error_file: <N/A>
284
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
285
+ ============================================================
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OfflineArcher
2
+ Research Code for the Offline Experiments of "ArCHer: Training Language Model Agents via Hierarchical Multi-Turn RL"
3
+
4
+ [Yifei Zhou](https://yifeizhou02.github.io/), [Andrea Zanette](https://azanette.com/), [Jiayi Pan](https://www.jiayipan.me/), [Aviral Kumar](https://aviralkumar2907.github.io/), [Sergey Levine](https://people.eecs.berkeley.edu/~svlevine/)
5
+
6
+ ![archer_diagram 001](https://github.com/YifeiZhou02/ArCHer/assets/83000332/b874432a-d330-49a5-906c-bba37e17f831)
7
+
8
+
9
+ This repo supports the following methods:
10
+
11
+ - [Offline ArCHer][1]
12
+ - Offline Filtered BC
13
+ - Offline BC
14
+
15
+ [1]: https://github.com/YifeiZhou02/ArCHer
16
+
17
+ And the following environments
18
+ - [Twenty Questions][2]
19
+
20
+ [2]: https://lmrl-gym.github.io/
21
+
22
+
23
+ ## Quick Start
24
+ ### 1. Install Dependencies
25
+ ```bash
26
+ conda create -n archer python==3.10
27
+ conda activate archer
28
+
29
+ git clone https://github.com/andreazanette/OfflineArcher
30
+ cd OfflineArcher
31
+ python -m pip install -e .
32
+ ```
33
+ ### 2. Download Datasets and Oracles
34
+ Offline datasets and Oracles checkpoints used in the paper can be found [here](https://drive.google.com/drive/folders/1pRocQI0Jv479G4vNMtQn1JOq8Shf2B6U?usp=sharing).
35
+ You will need to create an "oracles" and "datasets" folder and put the oracle and dataset in such folders.
36
+ The oracle for Twenty Questions should be named 20q_t5_oracle.pt and the dataset should be called "twenty_questions.json".
37
+
38
+ ### 3. Run Experiments
39
+ You can directly run experiments by runnig the launch scripts. For example, in order to lauch Offline Archer on Twenty Question simply run
40
+ ```bash
41
+ . submit_OfflineArcher_TwentyQuestions.sh
42
+ ```
43
+ The code uses the torch lightning framework. Please refer to the documentation of torch lightning (https://lightning.ai/docs/pytorch/stable/) for additional information, such as using different flags when launching the code.
44
+ For example, in order to run on GPU 0 please add
45
+ --trainer.devices=[0] to the launch script.
46
+
47
+ ### 4. Citing Archer
48
+ ```
49
+ @misc{zhou2024archer,
50
+ title={ArCHer: Training Language Model Agents via Hierarchical Multi-Turn RL},
51
+ author={Yifei Zhou and Andrea Zanette and Jiayi Pan and Sergey Levine and Aviral Kumar},
52
+ year={2024},
53
+ eprint={2402.19446},
54
+ archivePrefix={arXiv},
55
+ primaryClass={cs.LG}
56
+ }
57
+
SimulateOnEnv.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def batch_simulate_on_environment(policy, env, verbose = True):
4
+ if verbose:
5
+ print("*** In batch_simulate_on_environment ***")
6
+
7
+ from Dataset import Trajectory, TrajectoryDataset
8
+ from math import ceil
9
+
10
+ dataset = TrajectoryDataset()
11
+
12
+ trajectories = [Trajectory() for _ in range(env.bsize)]
13
+ batch_obs = env.reset()
14
+ batch_done = [False,]*env.bsize
15
+ while not all(batch_done):
16
+ with torch.no_grad():
17
+ actions = policy(batch_obs)
18
+ batch_feedback = env.step(actions)
19
+ for i, feedback in zip(range(env.bsize), batch_feedback):
20
+ if feedback is None:
21
+ continue
22
+
23
+ next_obs, r, done = feedback
24
+
25
+ trajectories[i].append({"observation": batch_obs[i],
26
+ "action": actions[i],
27
+ "reward": r,
28
+ "next_observation": next_obs,
29
+ "done": done,
30
+ })
31
+ batch_obs[i] = next_obs
32
+ batch_done[i] = done
33
+ for trajectory in trajectories:
34
+ dataset.append_trajectory(trajectory)
35
+ print(trajectory.transitions[-1].next_observation)
36
+
37
+ dataset.check_consistency()
38
+ if verbose:
39
+ print("Data Coollection is Complete. Returns: \n", dataset.get_all_trajectory_returns(), "\n with mean: ",dataset.mean_trajectory_return(), "\n" )
40
+ return dataset
Tasks.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lightning import LightningDataModule
2
+ import torch.utils.data as data
3
+ from Dataset import TrajectoryDataset, EmptyDataset
4
+ from SimulateOnEnv import batch_simulate_on_environment
5
+ import numpy as np
6
+ from copy import deepcopy
7
+ import sys
8
+ import random
9
+
10
+
11
+ def rsa_reward(num_feature, min_turns, conv_turn, gamma=2.0):
12
+ """
13
+ Nonlinear normalization function, returns u ∈ [0, 1]
14
+ - num_feature = min_turns -> u = 1
15
+ - num_feature = conv_turn -> u = 0
16
+ - The closer to min_turns, the slower it approaches 1
17
+ """
18
+ if num_feature == min_turns:
19
+ return 1
20
+ # Normalize to [0,1]
21
+ u = (conv_turn - num_feature) / (min_turns - num_feature)
22
+ # Keep direction (support num_feature < min_turns)
23
+ return max(0, min(1, u**gamma))
24
+
25
+
26
+ class Task(LightningDataModule):
27
+ def __init__(self, batch_size: int, n_traj_eval: int, **kwargs):
28
+ super().__init__(**kwargs)
29
+ self.batch_size = batch_size
30
+ self.eval_batch_size = self.batch_size
31
+ self.n_traj_eval = n_traj_eval
32
+
33
+ # Set Defaults
34
+ self.shuffle = True
35
+ self.drop_last = True # skips last batch to make sure gradient accumulation works as intended
36
+
37
+ def setup(self, stage: str):
38
+ raise NotImplementedError
39
+
40
+ def train_dataloader(self):
41
+ return data.DataLoader(
42
+ dataset=self.dataset,
43
+ batch_size=self.batch_size,
44
+ shuffle=self.shuffle,
45
+ drop_last=self.drop_last,
46
+ num_workers=8,
47
+ pin_memory=True,
48
+ persistent_workers=True,
49
+ )
50
+
51
+ def val_dataloader(self):
52
+ return data.DataLoader(
53
+ dataset=EmptyDataset(length=self.n_traj_eval),
54
+ batch_size=self.eval_batch_size,
55
+ pin_memory=True,
56
+ )
57
+
58
+ def get_eval_log(self, **kwargs):
59
+ pass
60
+
61
+ def teardown(self, stage: str):
62
+ # Used to clean-up when the run is finished
63
+ pass
64
+
65
+
66
+ class TwentyQuestions(Task):
67
+ def __init__(self, batch_size: int, n_traj_eval: int, word_list=None, **kwargs):
68
+ super().__init__(batch_size, n_traj_eval, **kwargs)
69
+
70
+ self.word_list = word_list
71
+ self.max_horizon = 20
72
+
73
+ def setup(self, stage: str):
74
+ self.dataset = self.read_data()
75
+ self.dataset.check_consistency()
76
+ print(
77
+ "\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
78
+ )
79
+
80
+ def read_data(self):
81
+ import json
82
+ from Dataset import TrajectoryDataset
83
+
84
+ f = open("datasets/20q_train.json")
85
+ data = json.load(f)
86
+ dataset = TrajectoryDataset()
87
+
88
+ for game in data:
89
+ assert len(game["lines"]) <= 20
90
+ history = "Questions:\n" # assertion is checked with history = ''
91
+ for interaction in game["lines"]:
92
+ yesAnswer = interaction[-5:] == " Yes."
93
+ noAnswer = interaction[-4:] == " No."
94
+ assert yesAnswer or noAnswer
95
+ observation = history
96
+
97
+ done = (
98
+ True if interaction == game["lines"][-1] else False
99
+ ) # if the interaction is the last interaction we are done
100
+ reward = 0 if done and game["correct"] else -1
101
+
102
+ if yesAnswer:
103
+ action = interaction[:-5]
104
+ if noAnswer:
105
+ action = interaction[:-4]
106
+
107
+ history += interaction + "\n"
108
+ dataset.append_observation_action_reward(observation, action, reward)
109
+ dataset.append_terminal_observation(
110
+ history,
111
+ trajectory_info={"correct": game["correct"], "word": game["word"]},
112
+ )
113
+
114
+ dataset.check_consistency()
115
+ return dataset
116
+
117
+
118
+ class RSAGame(Task):
119
+ def __init__(
120
+ self,
121
+ base_model: str,
122
+ batch_size: int,
123
+ n_traj_eval: int,
124
+ word_list=None,
125
+ **kwargs,
126
+ ):
127
+ super().__init__(batch_size, n_traj_eval, **kwargs)
128
+ self.base_model = base_model
129
+
130
+ self.word_list = word_list
131
+ self.max_horizon = 20
132
+
133
+ def setup(self, stage: str):
134
+ self.dataset = self.read_data()
135
+ self.dataset.check_consistency()
136
+ print(
137
+ "\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
138
+ )
139
+
140
+ def read_data(self):
141
+ import json
142
+ from Dataset import TrajectoryDataset
143
+ from rsa_game import get_game_outcome, randomly_convert_game_history_to_query
144
+
145
+ with open(
146
+ f"rsa/{self.base_model}_sampling_all_targets_results.json"
147
+ ) as f:
148
+ data = json.load(f)
149
+ with open(
150
+ "rsa/reasoning_dialogs.json"
151
+ ) as f:
152
+ for key, value in json.load(f).items():
153
+ instance = {}
154
+ instance["history"] = value["dialog"]
155
+ instance["target"] = value["target_referent"].split(" ")
156
+ instance["min_turns"] = len(value["dialog"])
157
+ instance["max_turns"] = len(instance["target"]) * 2
158
+ instance["object_list"] = value["referent_set"]
159
+ data.append(instance)
160
+
161
+ dataset = TrajectoryDataset()
162
+
163
+ for game in random.sample(data, 3200):
164
+ is_valid = True
165
+ for message in game["history"]:
166
+ if message["content"] == "":
167
+ is_valid = False
168
+ break
169
+ if not is_valid:
170
+ continue
171
+
172
+ outcome, history_length = get_game_outcome(
173
+ game["history"], game["target"], game["min_turns"]
174
+ )
175
+ if outcome == "game wins":
176
+ reward = rsa_reward(
177
+ len(game["target"]) * 2, game["min_turns"] * 2, history_length
178
+ )
179
+ else:
180
+ continue
181
+
182
+ if reward == 0:
183
+ continue
184
+
185
+ for idx, interaction in enumerate(game["history"][:history_length]):
186
+ query = randomly_convert_game_history_to_query(
187
+ game["history"][:idx],
188
+ target=game["target"],
189
+ min_turns=game["min_turns"],
190
+ object_list=game["object_list"],
191
+ )
192
+ target = interaction["content"]
193
+
194
+ done = (
195
+ True if idx >= history_length - 2 else False
196
+ ) # if the interaction is the last interaction we are done
197
+ reward = 0 if done else reward
198
+
199
+ dataset.append_observation_action_reward(query, target, reward)
200
+
201
+ history = randomly_convert_game_history_to_query(
202
+ game["history"],
203
+ target=game["target"],
204
+ min_turns=game["min_turns"],
205
+ object_list=game["object_list"],
206
+ )
207
+ dataset.append_terminal_observation(
208
+ history,
209
+ trajectory_info={
210
+ "object_list": game["object_list"],
211
+ "target": game["target"],
212
+ },
213
+ )
214
+
215
+ print("The length of the dataset is: ", len(dataset))
216
+ dataset.check_consistency()
217
+ return dataset
218
+
219
+
220
+ class WordTaboo(Task):
221
+ def __init__(
222
+ self,
223
+ base_model: str,
224
+ batch_size: int,
225
+ n_traj_eval: int,
226
+ word_list=None,
227
+ **kwargs,
228
+ ):
229
+ super().__init__(batch_size, n_traj_eval, **kwargs)
230
+
231
+ self.base_model = base_model
232
+ self.word_list = word_list
233
+ self.max_horizon = 20
234
+
235
+ def setup(self, stage: str):
236
+ self.dataset = self.read_data()
237
+ self.dataset.check_consistency()
238
+ print(
239
+ "\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
240
+ )
241
+
242
+ def read_data(self):
243
+ import json
244
+ from Dataset import TrajectoryDataset
245
+ from word_taboo import get_game_outcome, randomly_convert_game_history_to_query
246
+
247
+ with open(
248
+ f"wordtaboo/{self.base_model}_sampling_all_targets_results.json", "r"
249
+ ) as f:
250
+ data = json.load(f)
251
+ with open(
252
+ "wordtaboo/llm_game_top_test_results.json", "r"
253
+ ) as f:
254
+ data.extend(json.load(f))
255
+
256
+ dataset = TrajectoryDataset()
257
+
258
+ for game in data:
259
+ is_valid = True
260
+ for message in game["history"]:
261
+ if message["content"] == "":
262
+ is_valid = False
263
+ break
264
+ if not is_valid:
265
+ continue
266
+
267
+ outcome, history_length = get_game_outcome(
268
+ game["history"], game["target"], game["max_turns"]
269
+ )
270
+
271
+ if outcome == "defender wins":
272
+ winner = "defender"
273
+
274
+ elif outcome == "attacker wins":
275
+ if self.base_model == "Qwen3-14B":
276
+ if random.random() < 0.85: # 0.85 for qwen3; 0.9 for llama3
277
+ continue
278
+ else:
279
+ if random.random() < 0.9: # 0.85 for qwen3; 0.9 for llama3
280
+ continue
281
+ winner = "attacker"
282
+
283
+ else:
284
+ continue
285
+
286
+ for idx, interaction in enumerate(game["history"][:history_length]):
287
+ if interaction["role"] != winner:
288
+ continue
289
+
290
+ query = randomly_convert_game_history_to_query(
291
+ game["history"][:idx],
292
+ target=game["target"],
293
+ max_turns=game["max_turns"],
294
+ )
295
+
296
+ target = interaction["content"]
297
+
298
+ done = (
299
+ True if idx >= history_length - 2 else False
300
+ ) # if the interaction is the last interaction we are done
301
+ reward = 0 if done else 1
302
+
303
+ dataset.append_observation_action_reward(query, target, reward)
304
+
305
+ history = randomly_convert_game_history_to_query(
306
+ game["history"], game["target"], game["max_turns"]
307
+ )
308
+ dataset.append_terminal_observation(
309
+ history, trajectory_info={"target": game["target"]}
310
+ )
311
+ print("The length of the dataset is: ", len(dataset))
312
+ dataset.check_consistency()
313
+ return dataset
314
+
315
+
316
+ class StrategicDialogue(Task):
317
+ def __init__(
318
+ self,
319
+ base_model: str,
320
+ batch_size: int,
321
+ n_traj_eval: int,
322
+ word_list=None,
323
+ **kwargs,
324
+ ):
325
+ super().__init__(batch_size, n_traj_eval, **kwargs)
326
+ self.base_model = base_model
327
+
328
+ self.word_list = word_list
329
+ self.max_horizon = 20
330
+
331
+ def setup(self, stage: str):
332
+ self.dataset = self.read_data()
333
+ self.dataset.check_consistency()
334
+ print(
335
+ "\n *** Dataset Trimming Now Disabled. Please Called the Subroutine for triming"
336
+ )
337
+
338
+ def read_data(self):
339
+ import json
340
+ from Dataset import TrajectoryDataset
341
+ from word_taboo import get_game_outcome, randomly_convert_game_history_to_query
342
+
343
+ with open(
344
+ f"wordtaboo/{self.base_model}_sampling_all_targets_results.json", "r"
345
+ ) as f:
346
+ data = json.load(f)
347
+ with open(
348
+ "wordtaboo/llm_game_top_test_results.json", "r"
349
+ ) as f:
350
+ data.extend(json.load(f))
351
+
352
+ dataset = TrajectoryDataset()
353
+
354
+ for game in data:
355
+ is_valid = True
356
+ for message in game["history"]:
357
+ if message["content"] == "":
358
+ is_valid = False
359
+ break
360
+ if not is_valid:
361
+ continue
362
+
363
+ outcome, history_length = get_game_outcome(
364
+ game["history"], game["target"], game["max_turns"]
365
+ )
366
+
367
+ if outcome == "defender wins":
368
+ winner = "defender"
369
+
370
+ elif outcome == "attacker wins":
371
+ if self.base_model == "Qwen3-14B":
372
+ if random.random() < 0.85: # 0.85 for qwen3; 0.9 for llama3
373
+ continue
374
+ else:
375
+ if random.random() < 0.9: # 0.85 for qwen3; 0.9 for llama3
376
+ continue
377
+ winner = "attacker"
378
+
379
+ else:
380
+ continue
381
+
382
+ for idx, interaction in enumerate(game["history"][:history_length]):
383
+ if interaction["role"] != winner:
384
+ continue
385
+
386
+ query = randomly_convert_game_history_to_query(
387
+ game["history"][:idx],
388
+ target=game["target"],
389
+ max_turns=game["max_turns"],
390
+ )
391
+
392
+ target = interaction["content"]
393
+
394
+ done = (
395
+ True if idx >= history_length - 2 else False
396
+ ) # if the interaction is the last interaction we are done
397
+ reward = 0 if done else 1
398
+
399
+ dataset.append_observation_action_reward(query, target, reward)
400
+
401
+ history = randomly_convert_game_history_to_query(
402
+ game["history"], game["target"], game["max_turns"]
403
+ )
404
+ dataset.append_terminal_observation(
405
+ history, trajectory_info={"target": game["target"]}
406
+ )
407
+
408
+ from rsa_game import get_game_outcome, randomly_convert_game_history_to_query
409
+ with open(
410
+ f"rsa/{self.base_model}_sampling_all_targets_results.json"
411
+ ) as f:
412
+ data = json.load(f)
413
+ with open(
414
+ "rsa/reasoning_dialogs.json"
415
+ ) as f:
416
+ for key, value in json.load(f).items():
417
+ instance = {}
418
+ instance["history"] = value["dialog"]
419
+ instance["target"] = value["target_referent"].split(" ")
420
+ instance["min_turns"] = len(value["dialog"])
421
+ instance["max_turns"] = len(instance["target"]) * 2
422
+ instance["object_list"] = value["referent_set"]
423
+ data.append(instance)
424
+
425
+ for game in random.sample(data, 3200):
426
+ is_valid = True
427
+ for message in game["history"]:
428
+ if message["content"] == "":
429
+ is_valid = False
430
+ break
431
+ if not is_valid:
432
+ continue
433
+
434
+ outcome, history_length = get_game_outcome(
435
+ game["history"], game["target"], game["min_turns"]
436
+ )
437
+ if outcome == "game wins":
438
+ reward = rsa_reward(
439
+ len(game["target"]) * 2, game["min_turns"] * 2, history_length
440
+ )
441
+ else:
442
+ continue
443
+
444
+ for idx, interaction in enumerate(game["history"][:history_length]):
445
+ query = randomly_convert_game_history_to_query(
446
+ game["history"][:idx],
447
+ target=game["target"],
448
+ min_turns=game["min_turns"],
449
+ object_list=game["object_list"],
450
+ )
451
+ target = interaction["content"]
452
+
453
+ done = (
454
+ True if idx >= history_length - 2 else False
455
+ ) # if the interaction is the last interaction we are done
456
+ reward = 0 if done else reward
457
+
458
+ dataset.append_observation_action_reward(query, target, reward)
459
+
460
+ history = randomly_convert_game_history_to_query(
461
+ game["history"],
462
+ target=game["target"],
463
+ min_turns=game["min_turns"],
464
+ object_list=game["object_list"],
465
+ )
466
+ dataset.append_terminal_observation(
467
+ history,
468
+ trajectory_info={
469
+ "object_list": game["object_list"],
470
+ "target": game["target"],
471
+ },
472
+ )
473
+
474
+ print("The length of the dataset is: ", len(dataset))
475
+ dataset.check_consistency()
476
+ return dataset
__pycache__/Algorithms.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
__pycache__/ArcherCritic.cpython-310.pyc ADDED
Binary file (6.34 kB). View file
 
__pycache__/Dataset.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
__pycache__/SimulateOnEnv.cpython-310.pyc ADDED
Binary file (1.4 kB). View file
 
__pycache__/Tasks.cpython-310.pyc ADDED
Binary file (9.6 kB). View file
 
__pycache__/rsa_game.cpython-310.pyc ADDED
Binary file (18.9 kB). View file
 
__pycache__/twenty_questions.cpython-310.pyc ADDED
Binary file (8.53 kB). View file
 
__pycache__/word_taboo.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
archer.egg-info/PKG-INFO ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: archer
3
+ Version: 0.1.0
4
+ Summary: Research code for Offline ArCHer (Actor Critic Framework with Hierarchical Structures)
5
+ Home-page: https://github.com/andreazanette/OfflineArcher.git
6
+ Author: Andrea Zanette
7
+ License: MIT
8
+ Keywords: ArCHer
9
+ Classifier: Intended Audience :: Science/Research
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
13
+ Requires-Python: >=3.7
14
+ Description-Content-Type: text/markdown
15
+ Requires-Dist: lightning
16
+ Requires-Dist: jsonargparse
17
+ Requires-Dist: lightning-utilities
18
+ Requires-Dist: numpy
19
+ Requires-Dist: pytorch-lightning
20
+ Requires-Dist: safetensors
21
+ Requires-Dist: sentencepiece
22
+ Requires-Dist: torch
23
+ Requires-Dist: torchmetrics
24
+ Requires-Dist: tqdm
25
+ Requires-Dist: transformers
26
+ Requires-Dist: wandb
27
+ Requires-Dist: jsonargparse[signatures]>=4.26.1
28
+ Dynamic: author
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: description-content-type
32
+ Dynamic: home-page
33
+ Dynamic: keywords
34
+ Dynamic: license
35
+ Dynamic: requires-dist
36
+ Dynamic: requires-python
37
+ Dynamic: summary
38
+
39
+ # OfflineArcher
40
+ Research Code for the Offline Experiments of "ArCHer: Training Language Model Agents via Hierarchical Multi-Turn RL"
41
+
42
+ [Yifei Zhou](https://yifeizhou02.github.io/), [Andrea Zanette](https://azanette.com/), [Jiayi Pan](https://www.jiayipan.me/), [Aviral Kumar](https://aviralkumar2907.github.io/), [Sergey Levine](https://people.eecs.berkeley.edu/~svlevine/)
43
+
44
+ ![archer_diagram 001](https://github.com/YifeiZhou02/ArCHer/assets/83000332/b874432a-d330-49a5-906c-bba37e17f831)
45
+
46
+
47
+ This repo supports the following methods:
48
+
49
+ - [Offline ArCHer][1]
50
+ - Offline Filtered BC
51
+ - Offline BC
52
+
53
+ [1]: https://github.com/YifeiZhou02/ArCHer
54
+
55
+ And the following environments
56
+ - [Twenty Questions][2]
57
+
58
+ [2]: https://lmrl-gym.github.io/
59
+
60
+
61
+ ## Quick Start
62
+ ### 1. Install Dependencies
63
+ ```bash
64
+ conda create -n archer python==3.10
65
+ conda activate archer
66
+
67
+ git clone https://github.com/andreazanette/OfflineArcher
68
+ cd OfflineArcher
69
+ python -m pip install -e .
70
+ ```
71
+ ### 2. Download Datasets and Oracles
72
+ Offline datasets and Oracles checkpoints used in the paper can be found [here](https://drive.google.com/drive/folders/1pRocQI0Jv479G4vNMtQn1JOq8Shf2B6U?usp=sharing).
73
+ You will need to create an "oracles" and "datasets" folder and put the oracle and dataset in such folders.
74
+ The oracle for Twenty Questions should be named 20q_t5_oracle.pt and the dataset should be called "twenty_questions.json".
75
+
76
+ ### 3. Run Experiments
77
+ You can directly run experiments by runnig the launch scripts. For example, in order to lauch Offline Archer on Twenty Question simply run
78
+ ```bash
79
+ . submit_OfflineArcher_TwentyQuestions.sh
80
+ ```
81
+ The code uses the torch lightning framework. Please refer to the documentation of torch lightning (https://lightning.ai/docs/pytorch/stable/) for additional information, such as using different flags when launching the code.
82
+ For example, in order to run on GPU 0 please add
83
+ --trainer.devices=[0] to the launch script.
84
+
85
+ ### 4. Citing Archer
86
+ ```
87
+ @misc{zhou2024archer,
88
+ title={ArCHer: Training Language Model Agents via Hierarchical Multi-Turn RL},
89
+ author={Yifei Zhou and Andrea Zanette and Jiayi Pan and Sergey Levine and Aviral Kumar},
90
+ year={2024},
91
+ eprint={2402.19446},
92
+ archivePrefix={arXiv},
93
+ primaryClass={cs.LG}
94
+ }
95
+
archer.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ README.md
2
+ setup.py
3
+ archer.egg-info/PKG-INFO
4
+ archer.egg-info/SOURCES.txt
5
+ archer.egg-info/dependency_links.txt
6
+ archer.egg-info/requires.txt
7
+ archer.egg-info/top_level.txt
archer.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
archer.egg-info/requires.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lightning
2
+ jsonargparse
3
+ lightning-utilities
4
+ numpy
5
+ pytorch-lightning
6
+ safetensors
7
+ sentencepiece
8
+ torch
9
+ torchmetrics
10
+ tqdm
11
+ transformers
12
+ wandb
13
+ jsonargparse[signatures]>=4.26.1
archer.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
archer.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ce34aac81f6fa571d4d52496dff623e49463700b9f3506e0c12a467d56f16e
3
+ size 4223243374
checkpoints/archer_Llama3-8B-I_strategic/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /home/jiashuo/codes/ForesightOptim/checkpoints/im_Llama3-8B-I_strategic/merged_model
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/home/jiashuo/codes/ForesightOptim/checkpoints/im_Llama3-8B-I_strategic/merged_model
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
checkpoints/archer_Llama3-8B-I_strategic/adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/home/jiashuo/codes/ForesightOptim/checkpoints/im_Llama3-8B-I_strategic/merged_model",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 8,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "q_proj",
29
+ "v_proj"
30
+ ],
31
+ "target_parameters": null,
32
+ "task_type": "CAUSAL_LM",
33
+ "trainable_token_indices": null,
34
+ "use_dora": false,
35
+ "use_qalora": false,
36
+ "use_rslora": false
37
+ }
checkpoints/archer_Llama3-8B-I_strategic/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb3d93de581e4e6b207ed65fa6970bb0ef8ca2a52e8bd9edebc8ab70945f5039
3
+ size 6832728
checkpoints/archer_Llama3-8B-I_strategic/chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
2
+
3
+ '+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
4
+
5
+ ' }}{% endif %}
checkpoints/archer_Llama3-8B-I_strategic/special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|eot_id|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|eot_id|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
checkpoints/archer_Llama3-8B-I_strategic/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fc5ed64d17c57f61c0ef996ac8b3a8918e7d406866cc4a0292d362a31a217e4
3
+ size 17210125
checkpoints/archer_Llama3-8B-I_strategic/tokenizer_config.json ADDED
@@ -0,0 +1,2065 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "128000": {
4
+ "content": "<|begin_of_text|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "128001": {
12
+ "content": "<|end_of_text|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "128002": {
20
+ "content": "<|reserved_special_token_0|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "128003": {
28
+ "content": "<|reserved_special_token_1|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "128004": {
36
+ "content": "<|reserved_special_token_2|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "128005": {
44
+ "content": "<|reserved_special_token_3|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "128006": {
52
+ "content": "<|start_header_id|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "128007": {
60
+ "content": "<|end_header_id|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "128008": {
68
+ "content": "<|reserved_special_token_4|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "128009": {
76
+ "content": "<|eot_id|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "128010": {
84
+ "content": "<|reserved_special_token_5|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "128011": {
92
+ "content": "<|reserved_special_token_6|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "128012": {
100
+ "content": "<|reserved_special_token_7|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "128013": {
108
+ "content": "<|reserved_special_token_8|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "128014": {
116
+ "content": "<|reserved_special_token_9|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "128015": {
124
+ "content": "<|reserved_special_token_10|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "128016": {
132
+ "content": "<|reserved_special_token_11|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "128017": {
140
+ "content": "<|reserved_special_token_12|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "128018": {
148
+ "content": "<|reserved_special_token_13|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "128019": {
156
+ "content": "<|reserved_special_token_14|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "128020": {
164
+ "content": "<|reserved_special_token_15|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "128021": {
172
+ "content": "<|reserved_special_token_16|>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "128022": {
180
+ "content": "<|reserved_special_token_17|>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "128023": {
188
+ "content": "<|reserved_special_token_18|>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "128024": {
196
+ "content": "<|reserved_special_token_19|>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "128025": {
204
+ "content": "<|reserved_special_token_20|>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "128026": {
212
+ "content": "<|reserved_special_token_21|>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "128027": {
220
+ "content": "<|reserved_special_token_22|>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "128028": {
228
+ "content": "<|reserved_special_token_23|>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "128029": {
236
+ "content": "<|reserved_special_token_24|>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "128030": {
244
+ "content": "<|reserved_special_token_25|>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "128031": {
252
+ "content": "<|reserved_special_token_26|>",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "128032": {
260
+ "content": "<|reserved_special_token_27|>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "128033": {
268
+ "content": "<|reserved_special_token_28|>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "128034": {
276
+ "content": "<|reserved_special_token_29|>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "128035": {
284
+ "content": "<|reserved_special_token_30|>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "128036": {
292
+ "content": "<|reserved_special_token_31|>",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "128037": {
300
+ "content": "<|reserved_special_token_32|>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "128038": {
308
+ "content": "<|reserved_special_token_33|>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "128039": {
316
+ "content": "<|reserved_special_token_34|>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "128040": {
324
+ "content": "<|reserved_special_token_35|>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "128041": {
332
+ "content": "<|reserved_special_token_36|>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "128042": {
340
+ "content": "<|reserved_special_token_37|>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "128043": {
348
+ "content": "<|reserved_special_token_38|>",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "128044": {
356
+ "content": "<|reserved_special_token_39|>",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "128045": {
364
+ "content": "<|reserved_special_token_40|>",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "128046": {
372
+ "content": "<|reserved_special_token_41|>",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "128047": {
380
+ "content": "<|reserved_special_token_42|>",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "128048": {
388
+ "content": "<|reserved_special_token_43|>",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "128049": {
396
+ "content": "<|reserved_special_token_44|>",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "128050": {
404
+ "content": "<|reserved_special_token_45|>",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "128051": {
412
+ "content": "<|reserved_special_token_46|>",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "128052": {
420
+ "content": "<|reserved_special_token_47|>",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "128053": {
428
+ "content": "<|reserved_special_token_48|>",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "128054": {
436
+ "content": "<|reserved_special_token_49|>",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "128055": {
444
+ "content": "<|reserved_special_token_50|>",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "128056": {
452
+ "content": "<|reserved_special_token_51|>",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "128057": {
460
+ "content": "<|reserved_special_token_52|>",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "128058": {
468
+ "content": "<|reserved_special_token_53|>",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "128059": {
476
+ "content": "<|reserved_special_token_54|>",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "128060": {
484
+ "content": "<|reserved_special_token_55|>",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "128061": {
492
+ "content": "<|reserved_special_token_56|>",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "128062": {
500
+ "content": "<|reserved_special_token_57|>",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "128063": {
508
+ "content": "<|reserved_special_token_58|>",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "128064": {
516
+ "content": "<|reserved_special_token_59|>",
517
+ "lstrip": false,
518
+ "normalized": false,
519
+ "rstrip": false,
520
+ "single_word": false,
521
+ "special": true
522
+ },
523
+ "128065": {
524
+ "content": "<|reserved_special_token_60|>",
525
+ "lstrip": false,
526
+ "normalized": false,
527
+ "rstrip": false,
528
+ "single_word": false,
529
+ "special": true
530
+ },
531
+ "128066": {
532
+ "content": "<|reserved_special_token_61|>",
533
+ "lstrip": false,
534
+ "normalized": false,
535
+ "rstrip": false,
536
+ "single_word": false,
537
+ "special": true
538
+ },
539
+ "128067": {
540
+ "content": "<|reserved_special_token_62|>",
541
+ "lstrip": false,
542
+ "normalized": false,
543
+ "rstrip": false,
544
+ "single_word": false,
545
+ "special": true
546
+ },
547
+ "128068": {
548
+ "content": "<|reserved_special_token_63|>",
549
+ "lstrip": false,
550
+ "normalized": false,
551
+ "rstrip": false,
552
+ "single_word": false,
553
+ "special": true
554
+ },
555
+ "128069": {
556
+ "content": "<|reserved_special_token_64|>",
557
+ "lstrip": false,
558
+ "normalized": false,
559
+ "rstrip": false,
560
+ "single_word": false,
561
+ "special": true
562
+ },
563
+ "128070": {
564
+ "content": "<|reserved_special_token_65|>",
565
+ "lstrip": false,
566
+ "normalized": false,
567
+ "rstrip": false,
568
+ "single_word": false,
569
+ "special": true
570
+ },
571
+ "128071": {
572
+ "content": "<|reserved_special_token_66|>",
573
+ "lstrip": false,
574
+ "normalized": false,
575
+ "rstrip": false,
576
+ "single_word": false,
577
+ "special": true
578
+ },
579
+ "128072": {
580
+ "content": "<|reserved_special_token_67|>",
581
+ "lstrip": false,
582
+ "normalized": false,
583
+ "rstrip": false,
584
+ "single_word": false,
585
+ "special": true
586
+ },
587
+ "128073": {
588
+ "content": "<|reserved_special_token_68|>",
589
+ "lstrip": false,
590
+ "normalized": false,
591
+ "rstrip": false,
592
+ "single_word": false,
593
+ "special": true
594
+ },
595
+ "128074": {
596
+ "content": "<|reserved_special_token_69|>",
597
+ "lstrip": false,
598
+ "normalized": false,
599
+ "rstrip": false,
600
+ "single_word": false,
601
+ "special": true
602
+ },
603
+ "128075": {
604
+ "content": "<|reserved_special_token_70|>",
605
+ "lstrip": false,
606
+ "normalized": false,
607
+ "rstrip": false,
608
+ "single_word": false,
609
+ "special": true
610
+ },
611
+ "128076": {
612
+ "content": "<|reserved_special_token_71|>",
613
+ "lstrip": false,
614
+ "normalized": false,
615
+ "rstrip": false,
616
+ "single_word": false,
617
+ "special": true
618
+ },
619
+ "128077": {
620
+ "content": "<|reserved_special_token_72|>",
621
+ "lstrip": false,
622
+ "normalized": false,
623
+ "rstrip": false,
624
+ "single_word": false,
625
+ "special": true
626
+ },
627
+ "128078": {
628
+ "content": "<|reserved_special_token_73|>",
629
+ "lstrip": false,
630
+ "normalized": false,
631
+ "rstrip": false,
632
+ "single_word": false,
633
+ "special": true
634
+ },
635
+ "128079": {
636
+ "content": "<|reserved_special_token_74|>",
637
+ "lstrip": false,
638
+ "normalized": false,
639
+ "rstrip": false,
640
+ "single_word": false,
641
+ "special": true
642
+ },
643
+ "128080": {
644
+ "content": "<|reserved_special_token_75|>",
645
+ "lstrip": false,
646
+ "normalized": false,
647
+ "rstrip": false,
648
+ "single_word": false,
649
+ "special": true
650
+ },
651
+ "128081": {
652
+ "content": "<|reserved_special_token_76|>",
653
+ "lstrip": false,
654
+ "normalized": false,
655
+ "rstrip": false,
656
+ "single_word": false,
657
+ "special": true
658
+ },
659
+ "128082": {
660
+ "content": "<|reserved_special_token_77|>",
661
+ "lstrip": false,
662
+ "normalized": false,
663
+ "rstrip": false,
664
+ "single_word": false,
665
+ "special": true
666
+ },
667
+ "128083": {
668
+ "content": "<|reserved_special_token_78|>",
669
+ "lstrip": false,
670
+ "normalized": false,
671
+ "rstrip": false,
672
+ "single_word": false,
673
+ "special": true
674
+ },
675
+ "128084": {
676
+ "content": "<|reserved_special_token_79|>",
677
+ "lstrip": false,
678
+ "normalized": false,
679
+ "rstrip": false,
680
+ "single_word": false,
681
+ "special": true
682
+ },
683
+ "128085": {
684
+ "content": "<|reserved_special_token_80|>",
685
+ "lstrip": false,
686
+ "normalized": false,
687
+ "rstrip": false,
688
+ "single_word": false,
689
+ "special": true
690
+ },
691
+ "128086": {
692
+ "content": "<|reserved_special_token_81|>",
693
+ "lstrip": false,
694
+ "normalized": false,
695
+ "rstrip": false,
696
+ "single_word": false,
697
+ "special": true
698
+ },
699
+ "128087": {
700
+ "content": "<|reserved_special_token_82|>",
701
+ "lstrip": false,
702
+ "normalized": false,
703
+ "rstrip": false,
704
+ "single_word": false,
705
+ "special": true
706
+ },
707
+ "128088": {
708
+ "content": "<|reserved_special_token_83|>",
709
+ "lstrip": false,
710
+ "normalized": false,
711
+ "rstrip": false,
712
+ "single_word": false,
713
+ "special": true
714
+ },
715
+ "128089": {
716
+ "content": "<|reserved_special_token_84|>",
717
+ "lstrip": false,
718
+ "normalized": false,
719
+ "rstrip": false,
720
+ "single_word": false,
721
+ "special": true
722
+ },
723
+ "128090": {
724
+ "content": "<|reserved_special_token_85|>",
725
+ "lstrip": false,
726
+ "normalized": false,
727
+ "rstrip": false,
728
+ "single_word": false,
729
+ "special": true
730
+ },
731
+ "128091": {
732
+ "content": "<|reserved_special_token_86|>",
733
+ "lstrip": false,
734
+ "normalized": false,
735
+ "rstrip": false,
736
+ "single_word": false,
737
+ "special": true
738
+ },
739
+ "128092": {
740
+ "content": "<|reserved_special_token_87|>",
741
+ "lstrip": false,
742
+ "normalized": false,
743
+ "rstrip": false,
744
+ "single_word": false,
745
+ "special": true
746
+ },
747
+ "128093": {
748
+ "content": "<|reserved_special_token_88|>",
749
+ "lstrip": false,
750
+ "normalized": false,
751
+ "rstrip": false,
752
+ "single_word": false,
753
+ "special": true
754
+ },
755
+ "128094": {
756
+ "content": "<|reserved_special_token_89|>",
757
+ "lstrip": false,
758
+ "normalized": false,
759
+ "rstrip": false,
760
+ "single_word": false,
761
+ "special": true
762
+ },
763
+ "128095": {
764
+ "content": "<|reserved_special_token_90|>",
765
+ "lstrip": false,
766
+ "normalized": false,
767
+ "rstrip": false,
768
+ "single_word": false,
769
+ "special": true
770
+ },
771
+ "128096": {
772
+ "content": "<|reserved_special_token_91|>",
773
+ "lstrip": false,
774
+ "normalized": false,
775
+ "rstrip": false,
776
+ "single_word": false,
777
+ "special": true
778
+ },
779
+ "128097": {
780
+ "content": "<|reserved_special_token_92|>",
781
+ "lstrip": false,
782
+ "normalized": false,
783
+ "rstrip": false,
784
+ "single_word": false,
785
+ "special": true
786
+ },
787
+ "128098": {
788
+ "content": "<|reserved_special_token_93|>",
789
+ "lstrip": false,
790
+ "normalized": false,
791
+ "rstrip": false,
792
+ "single_word": false,
793
+ "special": true
794
+ },
795
+ "128099": {
796
+ "content": "<|reserved_special_token_94|>",
797
+ "lstrip": false,
798
+ "normalized": false,
799
+ "rstrip": false,
800
+ "single_word": false,
801
+ "special": true
802
+ },
803
+ "128100": {
804
+ "content": "<|reserved_special_token_95|>",
805
+ "lstrip": false,
806
+ "normalized": false,
807
+ "rstrip": false,
808
+ "single_word": false,
809
+ "special": true
810
+ },
811
+ "128101": {
812
+ "content": "<|reserved_special_token_96|>",
813
+ "lstrip": false,
814
+ "normalized": false,
815
+ "rstrip": false,
816
+ "single_word": false,
817
+ "special": true
818
+ },
819
+ "128102": {
820
+ "content": "<|reserved_special_token_97|>",
821
+ "lstrip": false,
822
+ "normalized": false,
823
+ "rstrip": false,
824
+ "single_word": false,
825
+ "special": true
826
+ },
827
+ "128103": {
828
+ "content": "<|reserved_special_token_98|>",
829
+ "lstrip": false,
830
+ "normalized": false,
831
+ "rstrip": false,
832
+ "single_word": false,
833
+ "special": true
834
+ },
835
+ "128104": {
836
+ "content": "<|reserved_special_token_99|>",
837
+ "lstrip": false,
838
+ "normalized": false,
839
+ "rstrip": false,
840
+ "single_word": false,
841
+ "special": true
842
+ },
843
+ "128105": {
844
+ "content": "<|reserved_special_token_100|>",
845
+ "lstrip": false,
846
+ "normalized": false,
847
+ "rstrip": false,
848
+ "single_word": false,
849
+ "special": true
850
+ },
851
+ "128106": {
852
+ "content": "<|reserved_special_token_101|>",
853
+ "lstrip": false,
854
+ "normalized": false,
855
+ "rstrip": false,
856
+ "single_word": false,
857
+ "special": true
858
+ },
859
+ "128107": {
860
+ "content": "<|reserved_special_token_102|>",
861
+ "lstrip": false,
862
+ "normalized": false,
863
+ "rstrip": false,
864
+ "single_word": false,
865
+ "special": true
866
+ },
867
+ "128108": {
868
+ "content": "<|reserved_special_token_103|>",
869
+ "lstrip": false,
870
+ "normalized": false,
871
+ "rstrip": false,
872
+ "single_word": false,
873
+ "special": true
874
+ },
875
+ "128109": {
876
+ "content": "<|reserved_special_token_104|>",
877
+ "lstrip": false,
878
+ "normalized": false,
879
+ "rstrip": false,
880
+ "single_word": false,
881
+ "special": true
882
+ },
883
+ "128110": {
884
+ "content": "<|reserved_special_token_105|>",
885
+ "lstrip": false,
886
+ "normalized": false,
887
+ "rstrip": false,
888
+ "single_word": false,
889
+ "special": true
890
+ },
891
+ "128111": {
892
+ "content": "<|reserved_special_token_106|>",
893
+ "lstrip": false,
894
+ "normalized": false,
895
+ "rstrip": false,
896
+ "single_word": false,
897
+ "special": true
898
+ },
899
+ "128112": {
900
+ "content": "<|reserved_special_token_107|>",
901
+ "lstrip": false,
902
+ "normalized": false,
903
+ "rstrip": false,
904
+ "single_word": false,
905
+ "special": true
906
+ },
907
+ "128113": {
908
+ "content": "<|reserved_special_token_108|>",
909
+ "lstrip": false,
910
+ "normalized": false,
911
+ "rstrip": false,
912
+ "single_word": false,
913
+ "special": true
914
+ },
915
+ "128114": {
916
+ "content": "<|reserved_special_token_109|>",
917
+ "lstrip": false,
918
+ "normalized": false,
919
+ "rstrip": false,
920
+ "single_word": false,
921
+ "special": true
922
+ },
923
+ "128115": {
924
+ "content": "<|reserved_special_token_110|>",
925
+ "lstrip": false,
926
+ "normalized": false,
927
+ "rstrip": false,
928
+ "single_word": false,
929
+ "special": true
930
+ },
931
+ "128116": {
932
+ "content": "<|reserved_special_token_111|>",
933
+ "lstrip": false,
934
+ "normalized": false,
935
+ "rstrip": false,
936
+ "single_word": false,
937
+ "special": true
938
+ },
939
+ "128117": {
940
+ "content": "<|reserved_special_token_112|>",
941
+ "lstrip": false,
942
+ "normalized": false,
943
+ "rstrip": false,
944
+ "single_word": false,
945
+ "special": true
946
+ },
947
+ "128118": {
948
+ "content": "<|reserved_special_token_113|>",
949
+ "lstrip": false,
950
+ "normalized": false,
951
+ "rstrip": false,
952
+ "single_word": false,
953
+ "special": true
954
+ },
955
+ "128119": {
956
+ "content": "<|reserved_special_token_114|>",
957
+ "lstrip": false,
958
+ "normalized": false,
959
+ "rstrip": false,
960
+ "single_word": false,
961
+ "special": true
962
+ },
963
+ "128120": {
964
+ "content": "<|reserved_special_token_115|>",
965
+ "lstrip": false,
966
+ "normalized": false,
967
+ "rstrip": false,
968
+ "single_word": false,
969
+ "special": true
970
+ },
971
+ "128121": {
972
+ "content": "<|reserved_special_token_116|>",
973
+ "lstrip": false,
974
+ "normalized": false,
975
+ "rstrip": false,
976
+ "single_word": false,
977
+ "special": true
978
+ },
979
+ "128122": {
980
+ "content": "<|reserved_special_token_117|>",
981
+ "lstrip": false,
982
+ "normalized": false,
983
+ "rstrip": false,
984
+ "single_word": false,
985
+ "special": true
986
+ },
987
+ "128123": {
988
+ "content": "<|reserved_special_token_118|>",
989
+ "lstrip": false,
990
+ "normalized": false,
991
+ "rstrip": false,
992
+ "single_word": false,
993
+ "special": true
994
+ },
995
+ "128124": {
996
+ "content": "<|reserved_special_token_119|>",
997
+ "lstrip": false,
998
+ "normalized": false,
999
+ "rstrip": false,
1000
+ "single_word": false,
1001
+ "special": true
1002
+ },
1003
+ "128125": {
1004
+ "content": "<|reserved_special_token_120|>",
1005
+ "lstrip": false,
1006
+ "normalized": false,
1007
+ "rstrip": false,
1008
+ "single_word": false,
1009
+ "special": true
1010
+ },
1011
+ "128126": {
1012
+ "content": "<|reserved_special_token_121|>",
1013
+ "lstrip": false,
1014
+ "normalized": false,
1015
+ "rstrip": false,
1016
+ "single_word": false,
1017
+ "special": true
1018
+ },
1019
+ "128127": {
1020
+ "content": "<|reserved_special_token_122|>",
1021
+ "lstrip": false,
1022
+ "normalized": false,
1023
+ "rstrip": false,
1024
+ "single_word": false,
1025
+ "special": true
1026
+ },
1027
+ "128128": {
1028
+ "content": "<|reserved_special_token_123|>",
1029
+ "lstrip": false,
1030
+ "normalized": false,
1031
+ "rstrip": false,
1032
+ "single_word": false,
1033
+ "special": true
1034
+ },
1035
+ "128129": {
1036
+ "content": "<|reserved_special_token_124|>",
1037
+ "lstrip": false,
1038
+ "normalized": false,
1039
+ "rstrip": false,
1040
+ "single_word": false,
1041
+ "special": true
1042
+ },
1043
+ "128130": {
1044
+ "content": "<|reserved_special_token_125|>",
1045
+ "lstrip": false,
1046
+ "normalized": false,
1047
+ "rstrip": false,
1048
+ "single_word": false,
1049
+ "special": true
1050
+ },
1051
+ "128131": {
1052
+ "content": "<|reserved_special_token_126|>",
1053
+ "lstrip": false,
1054
+ "normalized": false,
1055
+ "rstrip": false,
1056
+ "single_word": false,
1057
+ "special": true
1058
+ },
1059
+ "128132": {
1060
+ "content": "<|reserved_special_token_127|>",
1061
+ "lstrip": false,
1062
+ "normalized": false,
1063
+ "rstrip": false,
1064
+ "single_word": false,
1065
+ "special": true
1066
+ },
1067
+ "128133": {
1068
+ "content": "<|reserved_special_token_128|>",
1069
+ "lstrip": false,
1070
+ "normalized": false,
1071
+ "rstrip": false,
1072
+ "single_word": false,
1073
+ "special": true
1074
+ },
1075
+ "128134": {
1076
+ "content": "<|reserved_special_token_129|>",
1077
+ "lstrip": false,
1078
+ "normalized": false,
1079
+ "rstrip": false,
1080
+ "single_word": false,
1081
+ "special": true
1082
+ },
1083
+ "128135": {
1084
+ "content": "<|reserved_special_token_130|>",
1085
+ "lstrip": false,
1086
+ "normalized": false,
1087
+ "rstrip": false,
1088
+ "single_word": false,
1089
+ "special": true
1090
+ },
1091
+ "128136": {
1092
+ "content": "<|reserved_special_token_131|>",
1093
+ "lstrip": false,
1094
+ "normalized": false,
1095
+ "rstrip": false,
1096
+ "single_word": false,
1097
+ "special": true
1098
+ },
1099
+ "128137": {
1100
+ "content": "<|reserved_special_token_132|>",
1101
+ "lstrip": false,
1102
+ "normalized": false,
1103
+ "rstrip": false,
1104
+ "single_word": false,
1105
+ "special": true
1106
+ },
1107
+ "128138": {
1108
+ "content": "<|reserved_special_token_133|>",
1109
+ "lstrip": false,
1110
+ "normalized": false,
1111
+ "rstrip": false,
1112
+ "single_word": false,
1113
+ "special": true
1114
+ },
1115
+ "128139": {
1116
+ "content": "<|reserved_special_token_134|>",
1117
+ "lstrip": false,
1118
+ "normalized": false,
1119
+ "rstrip": false,
1120
+ "single_word": false,
1121
+ "special": true
1122
+ },
1123
+ "128140": {
1124
+ "content": "<|reserved_special_token_135|>",
1125
+ "lstrip": false,
1126
+ "normalized": false,
1127
+ "rstrip": false,
1128
+ "single_word": false,
1129
+ "special": true
1130
+ },
1131
+ "128141": {
1132
+ "content": "<|reserved_special_token_136|>",
1133
+ "lstrip": false,
1134
+ "normalized": false,
1135
+ "rstrip": false,
1136
+ "single_word": false,
1137
+ "special": true
1138
+ },
1139
+ "128142": {
1140
+ "content": "<|reserved_special_token_137|>",
1141
+ "lstrip": false,
1142
+ "normalized": false,
1143
+ "rstrip": false,
1144
+ "single_word": false,
1145
+ "special": true
1146
+ },
1147
+ "128143": {
1148
+ "content": "<|reserved_special_token_138|>",
1149
+ "lstrip": false,
1150
+ "normalized": false,
1151
+ "rstrip": false,
1152
+ "single_word": false,
1153
+ "special": true
1154
+ },
1155
+ "128144": {
1156
+ "content": "<|reserved_special_token_139|>",
1157
+ "lstrip": false,
1158
+ "normalized": false,
1159
+ "rstrip": false,
1160
+ "single_word": false,
1161
+ "special": true
1162
+ },
1163
+ "128145": {
1164
+ "content": "<|reserved_special_token_140|>",
1165
+ "lstrip": false,
1166
+ "normalized": false,
1167
+ "rstrip": false,
1168
+ "single_word": false,
1169
+ "special": true
1170
+ },
1171
+ "128146": {
1172
+ "content": "<|reserved_special_token_141|>",
1173
+ "lstrip": false,
1174
+ "normalized": false,
1175
+ "rstrip": false,
1176
+ "single_word": false,
1177
+ "special": true
1178
+ },
1179
+ "128147": {
1180
+ "content": "<|reserved_special_token_142|>",
1181
+ "lstrip": false,
1182
+ "normalized": false,
1183
+ "rstrip": false,
1184
+ "single_word": false,
1185
+ "special": true
1186
+ },
1187
+ "128148": {
1188
+ "content": "<|reserved_special_token_143|>",
1189
+ "lstrip": false,
1190
+ "normalized": false,
1191
+ "rstrip": false,
1192
+ "single_word": false,
1193
+ "special": true
1194
+ },
1195
+ "128149": {
1196
+ "content": "<|reserved_special_token_144|>",
1197
+ "lstrip": false,
1198
+ "normalized": false,
1199
+ "rstrip": false,
1200
+ "single_word": false,
1201
+ "special": true
1202
+ },
1203
+ "128150": {
1204
+ "content": "<|reserved_special_token_145|>",
1205
+ "lstrip": false,
1206
+ "normalized": false,
1207
+ "rstrip": false,
1208
+ "single_word": false,
1209
+ "special": true
1210
+ },
1211
+ "128151": {
1212
+ "content": "<|reserved_special_token_146|>",
1213
+ "lstrip": false,
1214
+ "normalized": false,
1215
+ "rstrip": false,
1216
+ "single_word": false,
1217
+ "special": true
1218
+ },
1219
+ "128152": {
1220
+ "content": "<|reserved_special_token_147|>",
1221
+ "lstrip": false,
1222
+ "normalized": false,
1223
+ "rstrip": false,
1224
+ "single_word": false,
1225
+ "special": true
1226
+ },
1227
+ "128153": {
1228
+ "content": "<|reserved_special_token_148|>",
1229
+ "lstrip": false,
1230
+ "normalized": false,
1231
+ "rstrip": false,
1232
+ "single_word": false,
1233
+ "special": true
1234
+ },
1235
+ "128154": {
1236
+ "content": "<|reserved_special_token_149|>",
1237
+ "lstrip": false,
1238
+ "normalized": false,
1239
+ "rstrip": false,
1240
+ "single_word": false,
1241
+ "special": true
1242
+ },
1243
+ "128155": {
1244
+ "content": "<|reserved_special_token_150|>",
1245
+ "lstrip": false,
1246
+ "normalized": false,
1247
+ "rstrip": false,
1248
+ "single_word": false,
1249
+ "special": true
1250
+ },
1251
+ "128156": {
1252
+ "content": "<|reserved_special_token_151|>",
1253
+ "lstrip": false,
1254
+ "normalized": false,
1255
+ "rstrip": false,
1256
+ "single_word": false,
1257
+ "special": true
1258
+ },
1259
+ "128157": {
1260
+ "content": "<|reserved_special_token_152|>",
1261
+ "lstrip": false,
1262
+ "normalized": false,
1263
+ "rstrip": false,
1264
+ "single_word": false,
1265
+ "special": true
1266
+ },
1267
+ "128158": {
1268
+ "content": "<|reserved_special_token_153|>",
1269
+ "lstrip": false,
1270
+ "normalized": false,
1271
+ "rstrip": false,
1272
+ "single_word": false,
1273
+ "special": true
1274
+ },
1275
+ "128159": {
1276
+ "content": "<|reserved_special_token_154|>",
1277
+ "lstrip": false,
1278
+ "normalized": false,
1279
+ "rstrip": false,
1280
+ "single_word": false,
1281
+ "special": true
1282
+ },
1283
+ "128160": {
1284
+ "content": "<|reserved_special_token_155|>",
1285
+ "lstrip": false,
1286
+ "normalized": false,
1287
+ "rstrip": false,
1288
+ "single_word": false,
1289
+ "special": true
1290
+ },
1291
+ "128161": {
1292
+ "content": "<|reserved_special_token_156|>",
1293
+ "lstrip": false,
1294
+ "normalized": false,
1295
+ "rstrip": false,
1296
+ "single_word": false,
1297
+ "special": true
1298
+ },
1299
+ "128162": {
1300
+ "content": "<|reserved_special_token_157|>",
1301
+ "lstrip": false,
1302
+ "normalized": false,
1303
+ "rstrip": false,
1304
+ "single_word": false,
1305
+ "special": true
1306
+ },
1307
+ "128163": {
1308
+ "content": "<|reserved_special_token_158|>",
1309
+ "lstrip": false,
1310
+ "normalized": false,
1311
+ "rstrip": false,
1312
+ "single_word": false,
1313
+ "special": true
1314
+ },
1315
+ "128164": {
1316
+ "content": "<|reserved_special_token_159|>",
1317
+ "lstrip": false,
1318
+ "normalized": false,
1319
+ "rstrip": false,
1320
+ "single_word": false,
1321
+ "special": true
1322
+ },
1323
+ "128165": {
1324
+ "content": "<|reserved_special_token_160|>",
1325
+ "lstrip": false,
1326
+ "normalized": false,
1327
+ "rstrip": false,
1328
+ "single_word": false,
1329
+ "special": true
1330
+ },
1331
+ "128166": {
1332
+ "content": "<|reserved_special_token_161|>",
1333
+ "lstrip": false,
1334
+ "normalized": false,
1335
+ "rstrip": false,
1336
+ "single_word": false,
1337
+ "special": true
1338
+ },
1339
+ "128167": {
1340
+ "content": "<|reserved_special_token_162|>",
1341
+ "lstrip": false,
1342
+ "normalized": false,
1343
+ "rstrip": false,
1344
+ "single_word": false,
1345
+ "special": true
1346
+ },
1347
+ "128168": {
1348
+ "content": "<|reserved_special_token_163|>",
1349
+ "lstrip": false,
1350
+ "normalized": false,
1351
+ "rstrip": false,
1352
+ "single_word": false,
1353
+ "special": true
1354
+ },
1355
+ "128169": {
1356
+ "content": "<|reserved_special_token_164|>",
1357
+ "lstrip": false,
1358
+ "normalized": false,
1359
+ "rstrip": false,
1360
+ "single_word": false,
1361
+ "special": true
1362
+ },
1363
+ "128170": {
1364
+ "content": "<|reserved_special_token_165|>",
1365
+ "lstrip": false,
1366
+ "normalized": false,
1367
+ "rstrip": false,
1368
+ "single_word": false,
1369
+ "special": true
1370
+ },
1371
+ "128171": {
1372
+ "content": "<|reserved_special_token_166|>",
1373
+ "lstrip": false,
1374
+ "normalized": false,
1375
+ "rstrip": false,
1376
+ "single_word": false,
1377
+ "special": true
1378
+ },
1379
+ "128172": {
1380
+ "content": "<|reserved_special_token_167|>",
1381
+ "lstrip": false,
1382
+ "normalized": false,
1383
+ "rstrip": false,
1384
+ "single_word": false,
1385
+ "special": true
1386
+ },
1387
+ "128173": {
1388
+ "content": "<|reserved_special_token_168|>",
1389
+ "lstrip": false,
1390
+ "normalized": false,
1391
+ "rstrip": false,
1392
+ "single_word": false,
1393
+ "special": true
1394
+ },
1395
+ "128174": {
1396
+ "content": "<|reserved_special_token_169|>",
1397
+ "lstrip": false,
1398
+ "normalized": false,
1399
+ "rstrip": false,
1400
+ "single_word": false,
1401
+ "special": true
1402
+ },
1403
+ "128175": {
1404
+ "content": "<|reserved_special_token_170|>",
1405
+ "lstrip": false,
1406
+ "normalized": false,
1407
+ "rstrip": false,
1408
+ "single_word": false,
1409
+ "special": true
1410
+ },
1411
+ "128176": {
1412
+ "content": "<|reserved_special_token_171|>",
1413
+ "lstrip": false,
1414
+ "normalized": false,
1415
+ "rstrip": false,
1416
+ "single_word": false,
1417
+ "special": true
1418
+ },
1419
+ "128177": {
1420
+ "content": "<|reserved_special_token_172|>",
1421
+ "lstrip": false,
1422
+ "normalized": false,
1423
+ "rstrip": false,
1424
+ "single_word": false,
1425
+ "special": true
1426
+ },
1427
+ "128178": {
1428
+ "content": "<|reserved_special_token_173|>",
1429
+ "lstrip": false,
1430
+ "normalized": false,
1431
+ "rstrip": false,
1432
+ "single_word": false,
1433
+ "special": true
1434
+ },
1435
+ "128179": {
1436
+ "content": "<|reserved_special_token_174|>",
1437
+ "lstrip": false,
1438
+ "normalized": false,
1439
+ "rstrip": false,
1440
+ "single_word": false,
1441
+ "special": true
1442
+ },
1443
+ "128180": {
1444
+ "content": "<|reserved_special_token_175|>",
1445
+ "lstrip": false,
1446
+ "normalized": false,
1447
+ "rstrip": false,
1448
+ "single_word": false,
1449
+ "special": true
1450
+ },
1451
+ "128181": {
1452
+ "content": "<|reserved_special_token_176|>",
1453
+ "lstrip": false,
1454
+ "normalized": false,
1455
+ "rstrip": false,
1456
+ "single_word": false,
1457
+ "special": true
1458
+ },
1459
+ "128182": {
1460
+ "content": "<|reserved_special_token_177|>",
1461
+ "lstrip": false,
1462
+ "normalized": false,
1463
+ "rstrip": false,
1464
+ "single_word": false,
1465
+ "special": true
1466
+ },
1467
+ "128183": {
1468
+ "content": "<|reserved_special_token_178|>",
1469
+ "lstrip": false,
1470
+ "normalized": false,
1471
+ "rstrip": false,
1472
+ "single_word": false,
1473
+ "special": true
1474
+ },
1475
+ "128184": {
1476
+ "content": "<|reserved_special_token_179|>",
1477
+ "lstrip": false,
1478
+ "normalized": false,
1479
+ "rstrip": false,
1480
+ "single_word": false,
1481
+ "special": true
1482
+ },
1483
+ "128185": {
1484
+ "content": "<|reserved_special_token_180|>",
1485
+ "lstrip": false,
1486
+ "normalized": false,
1487
+ "rstrip": false,
1488
+ "single_word": false,
1489
+ "special": true
1490
+ },
1491
+ "128186": {
1492
+ "content": "<|reserved_special_token_181|>",
1493
+ "lstrip": false,
1494
+ "normalized": false,
1495
+ "rstrip": false,
1496
+ "single_word": false,
1497
+ "special": true
1498
+ },
1499
+ "128187": {
1500
+ "content": "<|reserved_special_token_182|>",
1501
+ "lstrip": false,
1502
+ "normalized": false,
1503
+ "rstrip": false,
1504
+ "single_word": false,
1505
+ "special": true
1506
+ },
1507
+ "128188": {
1508
+ "content": "<|reserved_special_token_183|>",
1509
+ "lstrip": false,
1510
+ "normalized": false,
1511
+ "rstrip": false,
1512
+ "single_word": false,
1513
+ "special": true
1514
+ },
1515
+ "128189": {
1516
+ "content": "<|reserved_special_token_184|>",
1517
+ "lstrip": false,
1518
+ "normalized": false,
1519
+ "rstrip": false,
1520
+ "single_word": false,
1521
+ "special": true
1522
+ },
1523
+ "128190": {
1524
+ "content": "<|reserved_special_token_185|>",
1525
+ "lstrip": false,
1526
+ "normalized": false,
1527
+ "rstrip": false,
1528
+ "single_word": false,
1529
+ "special": true
1530
+ },
1531
+ "128191": {
1532
+ "content": "<|reserved_special_token_186|>",
1533
+ "lstrip": false,
1534
+ "normalized": false,
1535
+ "rstrip": false,
1536
+ "single_word": false,
1537
+ "special": true
1538
+ },
1539
+ "128192": {
1540
+ "content": "<|reserved_special_token_187|>",
1541
+ "lstrip": false,
1542
+ "normalized": false,
1543
+ "rstrip": false,
1544
+ "single_word": false,
1545
+ "special": true
1546
+ },
1547
+ "128193": {
1548
+ "content": "<|reserved_special_token_188|>",
1549
+ "lstrip": false,
1550
+ "normalized": false,
1551
+ "rstrip": false,
1552
+ "single_word": false,
1553
+ "special": true
1554
+ },
1555
+ "128194": {
1556
+ "content": "<|reserved_special_token_189|>",
1557
+ "lstrip": false,
1558
+ "normalized": false,
1559
+ "rstrip": false,
1560
+ "single_word": false,
1561
+ "special": true
1562
+ },
1563
+ "128195": {
1564
+ "content": "<|reserved_special_token_190|>",
1565
+ "lstrip": false,
1566
+ "normalized": false,
1567
+ "rstrip": false,
1568
+ "single_word": false,
1569
+ "special": true
1570
+ },
1571
+ "128196": {
1572
+ "content": "<|reserved_special_token_191|>",
1573
+ "lstrip": false,
1574
+ "normalized": false,
1575
+ "rstrip": false,
1576
+ "single_word": false,
1577
+ "special": true
1578
+ },
1579
+ "128197": {
1580
+ "content": "<|reserved_special_token_192|>",
1581
+ "lstrip": false,
1582
+ "normalized": false,
1583
+ "rstrip": false,
1584
+ "single_word": false,
1585
+ "special": true
1586
+ },
1587
+ "128198": {
1588
+ "content": "<|reserved_special_token_193|>",
1589
+ "lstrip": false,
1590
+ "normalized": false,
1591
+ "rstrip": false,
1592
+ "single_word": false,
1593
+ "special": true
1594
+ },
1595
+ "128199": {
1596
+ "content": "<|reserved_special_token_194|>",
1597
+ "lstrip": false,
1598
+ "normalized": false,
1599
+ "rstrip": false,
1600
+ "single_word": false,
1601
+ "special": true
1602
+ },
1603
+ "128200": {
1604
+ "content": "<|reserved_special_token_195|>",
1605
+ "lstrip": false,
1606
+ "normalized": false,
1607
+ "rstrip": false,
1608
+ "single_word": false,
1609
+ "special": true
1610
+ },
1611
+ "128201": {
1612
+ "content": "<|reserved_special_token_196|>",
1613
+ "lstrip": false,
1614
+ "normalized": false,
1615
+ "rstrip": false,
1616
+ "single_word": false,
1617
+ "special": true
1618
+ },
1619
+ "128202": {
1620
+ "content": "<|reserved_special_token_197|>",
1621
+ "lstrip": false,
1622
+ "normalized": false,
1623
+ "rstrip": false,
1624
+ "single_word": false,
1625
+ "special": true
1626
+ },
1627
+ "128203": {
1628
+ "content": "<|reserved_special_token_198|>",
1629
+ "lstrip": false,
1630
+ "normalized": false,
1631
+ "rstrip": false,
1632
+ "single_word": false,
1633
+ "special": true
1634
+ },
1635
+ "128204": {
1636
+ "content": "<|reserved_special_token_199|>",
1637
+ "lstrip": false,
1638
+ "normalized": false,
1639
+ "rstrip": false,
1640
+ "single_word": false,
1641
+ "special": true
1642
+ },
1643
+ "128205": {
1644
+ "content": "<|reserved_special_token_200|>",
1645
+ "lstrip": false,
1646
+ "normalized": false,
1647
+ "rstrip": false,
1648
+ "single_word": false,
1649
+ "special": true
1650
+ },
1651
+ "128206": {
1652
+ "content": "<|reserved_special_token_201|>",
1653
+ "lstrip": false,
1654
+ "normalized": false,
1655
+ "rstrip": false,
1656
+ "single_word": false,
1657
+ "special": true
1658
+ },
1659
+ "128207": {
1660
+ "content": "<|reserved_special_token_202|>",
1661
+ "lstrip": false,
1662
+ "normalized": false,
1663
+ "rstrip": false,
1664
+ "single_word": false,
1665
+ "special": true
1666
+ },
1667
+ "128208": {
1668
+ "content": "<|reserved_special_token_203|>",
1669
+ "lstrip": false,
1670
+ "normalized": false,
1671
+ "rstrip": false,
1672
+ "single_word": false,
1673
+ "special": true
1674
+ },
1675
+ "128209": {
1676
+ "content": "<|reserved_special_token_204|>",
1677
+ "lstrip": false,
1678
+ "normalized": false,
1679
+ "rstrip": false,
1680
+ "single_word": false,
1681
+ "special": true
1682
+ },
1683
+ "128210": {
1684
+ "content": "<|reserved_special_token_205|>",
1685
+ "lstrip": false,
1686
+ "normalized": false,
1687
+ "rstrip": false,
1688
+ "single_word": false,
1689
+ "special": true
1690
+ },
1691
+ "128211": {
1692
+ "content": "<|reserved_special_token_206|>",
1693
+ "lstrip": false,
1694
+ "normalized": false,
1695
+ "rstrip": false,
1696
+ "single_word": false,
1697
+ "special": true
1698
+ },
1699
+ "128212": {
1700
+ "content": "<|reserved_special_token_207|>",
1701
+ "lstrip": false,
1702
+ "normalized": false,
1703
+ "rstrip": false,
1704
+ "single_word": false,
1705
+ "special": true
1706
+ },
1707
+ "128213": {
1708
+ "content": "<|reserved_special_token_208|>",
1709
+ "lstrip": false,
1710
+ "normalized": false,
1711
+ "rstrip": false,
1712
+ "single_word": false,
1713
+ "special": true
1714
+ },
1715
+ "128214": {
1716
+ "content": "<|reserved_special_token_209|>",
1717
+ "lstrip": false,
1718
+ "normalized": false,
1719
+ "rstrip": false,
1720
+ "single_word": false,
1721
+ "special": true
1722
+ },
1723
+ "128215": {
1724
+ "content": "<|reserved_special_token_210|>",
1725
+ "lstrip": false,
1726
+ "normalized": false,
1727
+ "rstrip": false,
1728
+ "single_word": false,
1729
+ "special": true
1730
+ },
1731
+ "128216": {
1732
+ "content": "<|reserved_special_token_211|>",
1733
+ "lstrip": false,
1734
+ "normalized": false,
1735
+ "rstrip": false,
1736
+ "single_word": false,
1737
+ "special": true
1738
+ },
1739
+ "128217": {
1740
+ "content": "<|reserved_special_token_212|>",
1741
+ "lstrip": false,
1742
+ "normalized": false,
1743
+ "rstrip": false,
1744
+ "single_word": false,
1745
+ "special": true
1746
+ },
1747
+ "128218": {
1748
+ "content": "<|reserved_special_token_213|>",
1749
+ "lstrip": false,
1750
+ "normalized": false,
1751
+ "rstrip": false,
1752
+ "single_word": false,
1753
+ "special": true
1754
+ },
1755
+ "128219": {
1756
+ "content": "<|reserved_special_token_214|>",
1757
+ "lstrip": false,
1758
+ "normalized": false,
1759
+ "rstrip": false,
1760
+ "single_word": false,
1761
+ "special": true
1762
+ },
1763
+ "128220": {
1764
+ "content": "<|reserved_special_token_215|>",
1765
+ "lstrip": false,
1766
+ "normalized": false,
1767
+ "rstrip": false,
1768
+ "single_word": false,
1769
+ "special": true
1770
+ },
1771
+ "128221": {
1772
+ "content": "<|reserved_special_token_216|>",
1773
+ "lstrip": false,
1774
+ "normalized": false,
1775
+ "rstrip": false,
1776
+ "single_word": false,
1777
+ "special": true
1778
+ },
1779
+ "128222": {
1780
+ "content": "<|reserved_special_token_217|>",
1781
+ "lstrip": false,
1782
+ "normalized": false,
1783
+ "rstrip": false,
1784
+ "single_word": false,
1785
+ "special": true
1786
+ },
1787
+ "128223": {
1788
+ "content": "<|reserved_special_token_218|>",
1789
+ "lstrip": false,
1790
+ "normalized": false,
1791
+ "rstrip": false,
1792
+ "single_word": false,
1793
+ "special": true
1794
+ },
1795
+ "128224": {
1796
+ "content": "<|reserved_special_token_219|>",
1797
+ "lstrip": false,
1798
+ "normalized": false,
1799
+ "rstrip": false,
1800
+ "single_word": false,
1801
+ "special": true
1802
+ },
1803
+ "128225": {
1804
+ "content": "<|reserved_special_token_220|>",
1805
+ "lstrip": false,
1806
+ "normalized": false,
1807
+ "rstrip": false,
1808
+ "single_word": false,
1809
+ "special": true
1810
+ },
1811
+ "128226": {
1812
+ "content": "<|reserved_special_token_221|>",
1813
+ "lstrip": false,
1814
+ "normalized": false,
1815
+ "rstrip": false,
1816
+ "single_word": false,
1817
+ "special": true
1818
+ },
1819
+ "128227": {
1820
+ "content": "<|reserved_special_token_222|>",
1821
+ "lstrip": false,
1822
+ "normalized": false,
1823
+ "rstrip": false,
1824
+ "single_word": false,
1825
+ "special": true
1826
+ },
1827
+ "128228": {
1828
+ "content": "<|reserved_special_token_223|>",
1829
+ "lstrip": false,
1830
+ "normalized": false,
1831
+ "rstrip": false,
1832
+ "single_word": false,
1833
+ "special": true
1834
+ },
1835
+ "128229": {
1836
+ "content": "<|reserved_special_token_224|>",
1837
+ "lstrip": false,
1838
+ "normalized": false,
1839
+ "rstrip": false,
1840
+ "single_word": false,
1841
+ "special": true
1842
+ },
1843
+ "128230": {
1844
+ "content": "<|reserved_special_token_225|>",
1845
+ "lstrip": false,
1846
+ "normalized": false,
1847
+ "rstrip": false,
1848
+ "single_word": false,
1849
+ "special": true
1850
+ },
1851
+ "128231": {
1852
+ "content": "<|reserved_special_token_226|>",
1853
+ "lstrip": false,
1854
+ "normalized": false,
1855
+ "rstrip": false,
1856
+ "single_word": false,
1857
+ "special": true
1858
+ },
1859
+ "128232": {
1860
+ "content": "<|reserved_special_token_227|>",
1861
+ "lstrip": false,
1862
+ "normalized": false,
1863
+ "rstrip": false,
1864
+ "single_word": false,
1865
+ "special": true
1866
+ },
1867
+ "128233": {
1868
+ "content": "<|reserved_special_token_228|>",
1869
+ "lstrip": false,
1870
+ "normalized": false,
1871
+ "rstrip": false,
1872
+ "single_word": false,
1873
+ "special": true
1874
+ },
1875
+ "128234": {
1876
+ "content": "<|reserved_special_token_229|>",
1877
+ "lstrip": false,
1878
+ "normalized": false,
1879
+ "rstrip": false,
1880
+ "single_word": false,
1881
+ "special": true
1882
+ },
1883
+ "128235": {
1884
+ "content": "<|reserved_special_token_230|>",
1885
+ "lstrip": false,
1886
+ "normalized": false,
1887
+ "rstrip": false,
1888
+ "single_word": false,
1889
+ "special": true
1890
+ },
1891
+ "128236": {
1892
+ "content": "<|reserved_special_token_231|>",
1893
+ "lstrip": false,
1894
+ "normalized": false,
1895
+ "rstrip": false,
1896
+ "single_word": false,
1897
+ "special": true
1898
+ },
1899
+ "128237": {
1900
+ "content": "<|reserved_special_token_232|>",
1901
+ "lstrip": false,
1902
+ "normalized": false,
1903
+ "rstrip": false,
1904
+ "single_word": false,
1905
+ "special": true
1906
+ },
1907
+ "128238": {
1908
+ "content": "<|reserved_special_token_233|>",
1909
+ "lstrip": false,
1910
+ "normalized": false,
1911
+ "rstrip": false,
1912
+ "single_word": false,
1913
+ "special": true
1914
+ },
1915
+ "128239": {
1916
+ "content": "<|reserved_special_token_234|>",
1917
+ "lstrip": false,
1918
+ "normalized": false,
1919
+ "rstrip": false,
1920
+ "single_word": false,
1921
+ "special": true
1922
+ },
1923
+ "128240": {
1924
+ "content": "<|reserved_special_token_235|>",
1925
+ "lstrip": false,
1926
+ "normalized": false,
1927
+ "rstrip": false,
1928
+ "single_word": false,
1929
+ "special": true
1930
+ },
1931
+ "128241": {
1932
+ "content": "<|reserved_special_token_236|>",
1933
+ "lstrip": false,
1934
+ "normalized": false,
1935
+ "rstrip": false,
1936
+ "single_word": false,
1937
+ "special": true
1938
+ },
1939
+ "128242": {
1940
+ "content": "<|reserved_special_token_237|>",
1941
+ "lstrip": false,
1942
+ "normalized": false,
1943
+ "rstrip": false,
1944
+ "single_word": false,
1945
+ "special": true
1946
+ },
1947
+ "128243": {
1948
+ "content": "<|reserved_special_token_238|>",
1949
+ "lstrip": false,
1950
+ "normalized": false,
1951
+ "rstrip": false,
1952
+ "single_word": false,
1953
+ "special": true
1954
+ },
1955
+ "128244": {
1956
+ "content": "<|reserved_special_token_239|>",
1957
+ "lstrip": false,
1958
+ "normalized": false,
1959
+ "rstrip": false,
1960
+ "single_word": false,
1961
+ "special": true
1962
+ },
1963
+ "128245": {
1964
+ "content": "<|reserved_special_token_240|>",
1965
+ "lstrip": false,
1966
+ "normalized": false,
1967
+ "rstrip": false,
1968
+ "single_word": false,
1969
+ "special": true
1970
+ },
1971
+ "128246": {
1972
+ "content": "<|reserved_special_token_241|>",
1973
+ "lstrip": false,
1974
+ "normalized": false,
1975
+ "rstrip": false,
1976
+ "single_word": false,
1977
+ "special": true
1978
+ },
1979
+ "128247": {
1980
+ "content": "<|reserved_special_token_242|>",
1981
+ "lstrip": false,
1982
+ "normalized": false,
1983
+ "rstrip": false,
1984
+ "single_word": false,
1985
+ "special": true
1986
+ },
1987
+ "128248": {
1988
+ "content": "<|reserved_special_token_243|>",
1989
+ "lstrip": false,
1990
+ "normalized": false,
1991
+ "rstrip": false,
1992
+ "single_word": false,
1993
+ "special": true
1994
+ },
1995
+ "128249": {
1996
+ "content": "<|reserved_special_token_244|>",
1997
+ "lstrip": false,
1998
+ "normalized": false,
1999
+ "rstrip": false,
2000
+ "single_word": false,
2001
+ "special": true
2002
+ },
2003
+ "128250": {
2004
+ "content": "<|reserved_special_token_245|>",
2005
+ "lstrip": false,
2006
+ "normalized": false,
2007
+ "rstrip": false,
2008
+ "single_word": false,
2009
+ "special": true
2010
+ },
2011
+ "128251": {
2012
+ "content": "<|reserved_special_token_246|>",
2013
+ "lstrip": false,
2014
+ "normalized": false,
2015
+ "rstrip": false,
2016
+ "single_word": false,
2017
+ "special": true
2018
+ },
2019
+ "128252": {
2020
+ "content": "<|reserved_special_token_247|>",
2021
+ "lstrip": false,
2022
+ "normalized": false,
2023
+ "rstrip": false,
2024
+ "single_word": false,
2025
+ "special": true
2026
+ },
2027
+ "128253": {
2028
+ "content": "<|reserved_special_token_248|>",
2029
+ "lstrip": false,
2030
+ "normalized": false,
2031
+ "rstrip": false,
2032
+ "single_word": false,
2033
+ "special": true
2034
+ },
2035
+ "128254": {
2036
+ "content": "<|reserved_special_token_249|>",
2037
+ "lstrip": false,
2038
+ "normalized": false,
2039
+ "rstrip": false,
2040
+ "single_word": false,
2041
+ "special": true
2042
+ },
2043
+ "128255": {
2044
+ "content": "<|reserved_special_token_250|>",
2045
+ "lstrip": false,
2046
+ "normalized": false,
2047
+ "rstrip": false,
2048
+ "single_word": false,
2049
+ "special": true
2050
+ }
2051
+ },
2052
+ "bos_token": "<|begin_of_text|>",
2053
+ "clean_up_tokenization_spaces": true,
2054
+ "eos_token": "<|eot_id|>",
2055
+ "extra_special_tokens": {},
2056
+ "model_input_names": [
2057
+ "input_ids",
2058
+ "attention_mask"
2059
+ ],
2060
+ "model_max_length": 1024,
2061
+ "pad_token": "<|eot_id|>",
2062
+ "padding_side": "left",
2063
+ "tokenizer_class": "PreTrainedTokenizerFast",
2064
+ "truncation_side": "left"
2065
+ }
checkpoints/archer_Llama3-8B-I_word/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: /home/jiashuo/codes/ForesightOptim/checkpoints/im_Llama3-8B-I_word/merged_model
3
+ library_name: peft
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - base_model:adapter:/home/jiashuo/codes/ForesightOptim/checkpoints/im_Llama3-8B-I_word/merged_model
7
+ - lora
8
+ - transformers
9
+ ---
10
+
11
+ # Model Card for Model ID
12
+
13
+ <!-- Provide a quick summary of what the model is/does. -->
14
+
15
+
16
+
17
+ ## Model Details
18
+
19
+ ### Model Description
20
+
21
+ <!-- Provide a longer summary of what this model is. -->
22
+
23
+
24
+
25
+ - **Developed by:** [More Information Needed]
26
+ - **Funded by [optional]:** [More Information Needed]
27
+ - **Shared by [optional]:** [More Information Needed]
28
+ - **Model type:** [More Information Needed]
29
+ - **Language(s) (NLP):** [More Information Needed]
30
+ - **License:** [More Information Needed]
31
+ - **Finetuned from model [optional]:** [More Information Needed]
32
+
33
+ ### Model Sources [optional]
34
+
35
+ <!-- Provide the basic links for the model. -->
36
+
37
+ - **Repository:** [More Information Needed]
38
+ - **Paper [optional]:** [More Information Needed]
39
+ - **Demo [optional]:** [More Information Needed]
40
+
41
+ ## Uses
42
+
43
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
44
+
45
+ ### Direct Use
46
+
47
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
+
49
+ [More Information Needed]
50
+
51
+ ### Downstream Use [optional]
52
+
53
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
54
+
55
+ [More Information Needed]
56
+
57
+ ### Out-of-Scope Use
58
+
59
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
60
+
61
+ [More Information Needed]
62
+
63
+ ## Bias, Risks, and Limitations
64
+
65
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
66
+
67
+ [More Information Needed]
68
+
69
+ ### Recommendations
70
+
71
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
72
+
73
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
74
+
75
+ ## How to Get Started with the Model
76
+
77
+ Use the code below to get started with the model.
78
+
79
+ [More Information Needed]
80
+
81
+ ## Training Details
82
+
83
+ ### Training Data
84
+
85
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
86
+
87
+ [More Information Needed]
88
+
89
+ ### Training Procedure
90
+
91
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
92
+
93
+ #### Preprocessing [optional]
94
+
95
+ [More Information Needed]
96
+
97
+
98
+ #### Training Hyperparameters
99
+
100
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
101
+
102
+ #### Speeds, Sizes, Times [optional]
103
+
104
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
105
+
106
+ [More Information Needed]
107
+
108
+ ## Evaluation
109
+
110
+ <!-- This section describes the evaluation protocols and provides the results. -->
111
+
112
+ ### Testing Data, Factors & Metrics
113
+
114
+ #### Testing Data
115
+
116
+ <!-- This should link to a Dataset Card if possible. -->
117
+
118
+ [More Information Needed]
119
+
120
+ #### Factors
121
+
122
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
123
+
124
+ [More Information Needed]
125
+
126
+ #### Metrics
127
+
128
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
129
+
130
+ [More Information Needed]
131
+
132
+ ### Results
133
+
134
+ [More Information Needed]
135
+
136
+ #### Summary
137
+
138
+
139
+
140
+ ## Model Examination [optional]
141
+
142
+ <!-- Relevant interpretability work for the model goes here -->
143
+
144
+ [More Information Needed]
145
+
146
+ ## Environmental Impact
147
+
148
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
149
+
150
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
151
+
152
+ - **Hardware Type:** [More Information Needed]
153
+ - **Hours used:** [More Information Needed]
154
+ - **Cloud Provider:** [More Information Needed]
155
+ - **Compute Region:** [More Information Needed]
156
+ - **Carbon Emitted:** [More Information Needed]
157
+
158
+ ## Technical Specifications [optional]
159
+
160
+ ### Model Architecture and Objective
161
+
162
+ [More Information Needed]
163
+
164
+ ### Compute Infrastructure
165
+
166
+ [More Information Needed]
167
+
168
+ #### Hardware
169
+
170
+ [More Information Needed]
171
+
172
+ #### Software
173
+
174
+ [More Information Needed]
175
+
176
+ ## Citation [optional]
177
+
178
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
179
+
180
+ **BibTeX:**
181
+
182
+ [More Information Needed]
183
+
184
+ **APA:**
185
+
186
+ [More Information Needed]
187
+
188
+ ## Glossary [optional]
189
+
190
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
191
+
192
+ [More Information Needed]
193
+
194
+ ## More Information [optional]
195
+
196
+ [More Information Needed]
197
+
198
+ ## Model Card Authors [optional]
199
+
200
+ [More Information Needed]
201
+
202
+ ## Model Card Contact
203
+
204
+ [More Information Needed]
205
+ ### Framework versions
206
+
207
+ - PEFT 0.17.1
checkpoints/archer_Llama3-8B-I_word/adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "/home/jiashuo/codes/ForesightOptim/checkpoints/im_Llama3-8B-I_word/merged_model",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 8,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "v_proj",
29
+ "q_proj"
30
+ ],
31
+ "target_parameters": null,
32
+ "task_type": "CAUSAL_LM",
33
+ "trainable_token_indices": null,
34
+ "use_dora": false,
35
+ "use_qalora": false,
36
+ "use_rslora": false
37
+ }
checkpoints/archer_Llama3-8B-I_word/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4718a43b261c26cfc0d18d7ce3f147b5010ceeee38d74f1aa3c1f163c9203e6
3
+ size 6832728
checkpoints/archer_Llama3-8B-I_word/chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
2
+
3
+ '+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>
4
+
5
+ ' }}{% endif %}
checkpoints/archer_Llama3-8B-I_word/special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin_of_text|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|eot_id|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|eot_id|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
checkpoints/archer_Llama3-8B-I_word/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fc5ed64d17c57f61c0ef996ac8b3a8918e7d406866cc4a0292d362a31a217e4
3
+ size 17210125
checkpoints/archer_Llama3-8B-I_word/tokenizer_config.json ADDED
@@ -0,0 +1,2065 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "128000": {
4
+ "content": "<|begin_of_text|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "128001": {
12
+ "content": "<|end_of_text|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "128002": {
20
+ "content": "<|reserved_special_token_0|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "128003": {
28
+ "content": "<|reserved_special_token_1|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "128004": {
36
+ "content": "<|reserved_special_token_2|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "128005": {
44
+ "content": "<|reserved_special_token_3|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "128006": {
52
+ "content": "<|start_header_id|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "128007": {
60
+ "content": "<|end_header_id|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "128008": {
68
+ "content": "<|reserved_special_token_4|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "128009": {
76
+ "content": "<|eot_id|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "128010": {
84
+ "content": "<|reserved_special_token_5|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "128011": {
92
+ "content": "<|reserved_special_token_6|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "128012": {
100
+ "content": "<|reserved_special_token_7|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "128013": {
108
+ "content": "<|reserved_special_token_8|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "128014": {
116
+ "content": "<|reserved_special_token_9|>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "128015": {
124
+ "content": "<|reserved_special_token_10|>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "128016": {
132
+ "content": "<|reserved_special_token_11|>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "128017": {
140
+ "content": "<|reserved_special_token_12|>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "128018": {
148
+ "content": "<|reserved_special_token_13|>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "128019": {
156
+ "content": "<|reserved_special_token_14|>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "128020": {
164
+ "content": "<|reserved_special_token_15|>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "128021": {
172
+ "content": "<|reserved_special_token_16|>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "128022": {
180
+ "content": "<|reserved_special_token_17|>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "128023": {
188
+ "content": "<|reserved_special_token_18|>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "128024": {
196
+ "content": "<|reserved_special_token_19|>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "128025": {
204
+ "content": "<|reserved_special_token_20|>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "128026": {
212
+ "content": "<|reserved_special_token_21|>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "128027": {
220
+ "content": "<|reserved_special_token_22|>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "128028": {
228
+ "content": "<|reserved_special_token_23|>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "128029": {
236
+ "content": "<|reserved_special_token_24|>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "128030": {
244
+ "content": "<|reserved_special_token_25|>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "128031": {
252
+ "content": "<|reserved_special_token_26|>",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "128032": {
260
+ "content": "<|reserved_special_token_27|>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "128033": {
268
+ "content": "<|reserved_special_token_28|>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "128034": {
276
+ "content": "<|reserved_special_token_29|>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "128035": {
284
+ "content": "<|reserved_special_token_30|>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "128036": {
292
+ "content": "<|reserved_special_token_31|>",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "128037": {
300
+ "content": "<|reserved_special_token_32|>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "128038": {
308
+ "content": "<|reserved_special_token_33|>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "128039": {
316
+ "content": "<|reserved_special_token_34|>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "128040": {
324
+ "content": "<|reserved_special_token_35|>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "128041": {
332
+ "content": "<|reserved_special_token_36|>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "128042": {
340
+ "content": "<|reserved_special_token_37|>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "128043": {
348
+ "content": "<|reserved_special_token_38|>",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "128044": {
356
+ "content": "<|reserved_special_token_39|>",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "128045": {
364
+ "content": "<|reserved_special_token_40|>",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "128046": {
372
+ "content": "<|reserved_special_token_41|>",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "128047": {
380
+ "content": "<|reserved_special_token_42|>",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "128048": {
388
+ "content": "<|reserved_special_token_43|>",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "128049": {
396
+ "content": "<|reserved_special_token_44|>",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "128050": {
404
+ "content": "<|reserved_special_token_45|>",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "128051": {
412
+ "content": "<|reserved_special_token_46|>",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "128052": {
420
+ "content": "<|reserved_special_token_47|>",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "128053": {
428
+ "content": "<|reserved_special_token_48|>",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "128054": {
436
+ "content": "<|reserved_special_token_49|>",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "128055": {
444
+ "content": "<|reserved_special_token_50|>",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "128056": {
452
+ "content": "<|reserved_special_token_51|>",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "128057": {
460
+ "content": "<|reserved_special_token_52|>",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "128058": {
468
+ "content": "<|reserved_special_token_53|>",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "128059": {
476
+ "content": "<|reserved_special_token_54|>",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "128060": {
484
+ "content": "<|reserved_special_token_55|>",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "128061": {
492
+ "content": "<|reserved_special_token_56|>",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "128062": {
500
+ "content": "<|reserved_special_token_57|>",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "128063": {
508
+ "content": "<|reserved_special_token_58|>",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "128064": {
516
+ "content": "<|reserved_special_token_59|>",
517
+ "lstrip": false,
518
+ "normalized": false,
519
+ "rstrip": false,
520
+ "single_word": false,
521
+ "special": true
522
+ },
523
+ "128065": {
524
+ "content": "<|reserved_special_token_60|>",
525
+ "lstrip": false,
526
+ "normalized": false,
527
+ "rstrip": false,
528
+ "single_word": false,
529
+ "special": true
530
+ },
531
+ "128066": {
532
+ "content": "<|reserved_special_token_61|>",
533
+ "lstrip": false,
534
+ "normalized": false,
535
+ "rstrip": false,
536
+ "single_word": false,
537
+ "special": true
538
+ },
539
+ "128067": {
540
+ "content": "<|reserved_special_token_62|>",
541
+ "lstrip": false,
542
+ "normalized": false,
543
+ "rstrip": false,
544
+ "single_word": false,
545
+ "special": true
546
+ },
547
+ "128068": {
548
+ "content": "<|reserved_special_token_63|>",
549
+ "lstrip": false,
550
+ "normalized": false,
551
+ "rstrip": false,
552
+ "single_word": false,
553
+ "special": true
554
+ },
555
+ "128069": {
556
+ "content": "<|reserved_special_token_64|>",
557
+ "lstrip": false,
558
+ "normalized": false,
559
+ "rstrip": false,
560
+ "single_word": false,
561
+ "special": true
562
+ },
563
+ "128070": {
564
+ "content": "<|reserved_special_token_65|>",
565
+ "lstrip": false,
566
+ "normalized": false,
567
+ "rstrip": false,
568
+ "single_word": false,
569
+ "special": true
570
+ },
571
+ "128071": {
572
+ "content": "<|reserved_special_token_66|>",
573
+ "lstrip": false,
574
+ "normalized": false,
575
+ "rstrip": false,
576
+ "single_word": false,
577
+ "special": true
578
+ },
579
+ "128072": {
580
+ "content": "<|reserved_special_token_67|>",
581
+ "lstrip": false,
582
+ "normalized": false,
583
+ "rstrip": false,
584
+ "single_word": false,
585
+ "special": true
586
+ },
587
+ "128073": {
588
+ "content": "<|reserved_special_token_68|>",
589
+ "lstrip": false,
590
+ "normalized": false,
591
+ "rstrip": false,
592
+ "single_word": false,
593
+ "special": true
594
+ },
595
+ "128074": {
596
+ "content": "<|reserved_special_token_69|>",
597
+ "lstrip": false,
598
+ "normalized": false,
599
+ "rstrip": false,
600
+ "single_word": false,
601
+ "special": true
602
+ },
603
+ "128075": {
604
+ "content": "<|reserved_special_token_70|>",
605
+ "lstrip": false,
606
+ "normalized": false,
607
+ "rstrip": false,
608
+ "single_word": false,
609
+ "special": true
610
+ },
611
+ "128076": {
612
+ "content": "<|reserved_special_token_71|>",
613
+ "lstrip": false,
614
+ "normalized": false,
615
+ "rstrip": false,
616
+ "single_word": false,
617
+ "special": true
618
+ },
619
+ "128077": {
620
+ "content": "<|reserved_special_token_72|>",
621
+ "lstrip": false,
622
+ "normalized": false,
623
+ "rstrip": false,
624
+ "single_word": false,
625
+ "special": true
626
+ },
627
+ "128078": {
628
+ "content": "<|reserved_special_token_73|>",
629
+ "lstrip": false,
630
+ "normalized": false,
631
+ "rstrip": false,
632
+ "single_word": false,
633
+ "special": true
634
+ },
635
+ "128079": {
636
+ "content": "<|reserved_special_token_74|>",
637
+ "lstrip": false,
638
+ "normalized": false,
639
+ "rstrip": false,
640
+ "single_word": false,
641
+ "special": true
642
+ },
643
+ "128080": {
644
+ "content": "<|reserved_special_token_75|>",
645
+ "lstrip": false,
646
+ "normalized": false,
647
+ "rstrip": false,
648
+ "single_word": false,
649
+ "special": true
650
+ },
651
+ "128081": {
652
+ "content": "<|reserved_special_token_76|>",
653
+ "lstrip": false,
654
+ "normalized": false,
655
+ "rstrip": false,
656
+ "single_word": false,
657
+ "special": true
658
+ },
659
+ "128082": {
660
+ "content": "<|reserved_special_token_77|>",
661
+ "lstrip": false,
662
+ "normalized": false,
663
+ "rstrip": false,
664
+ "single_word": false,
665
+ "special": true
666
+ },
667
+ "128083": {
668
+ "content": "<|reserved_special_token_78|>",
669
+ "lstrip": false,
670
+ "normalized": false,
671
+ "rstrip": false,
672
+ "single_word": false,
673
+ "special": true
674
+ },
675
+ "128084": {
676
+ "content": "<|reserved_special_token_79|>",
677
+ "lstrip": false,
678
+ "normalized": false,
679
+ "rstrip": false,
680
+ "single_word": false,
681
+ "special": true
682
+ },
683
+ "128085": {
684
+ "content": "<|reserved_special_token_80|>",
685
+ "lstrip": false,
686
+ "normalized": false,
687
+ "rstrip": false,
688
+ "single_word": false,
689
+ "special": true
690
+ },
691
+ "128086": {
692
+ "content": "<|reserved_special_token_81|>",
693
+ "lstrip": false,
694
+ "normalized": false,
695
+ "rstrip": false,
696
+ "single_word": false,
697
+ "special": true
698
+ },
699
+ "128087": {
700
+ "content": "<|reserved_special_token_82|>",
701
+ "lstrip": false,
702
+ "normalized": false,
703
+ "rstrip": false,
704
+ "single_word": false,
705
+ "special": true
706
+ },
707
+ "128088": {
708
+ "content": "<|reserved_special_token_83|>",
709
+ "lstrip": false,
710
+ "normalized": false,
711
+ "rstrip": false,
712
+ "single_word": false,
713
+ "special": true
714
+ },
715
+ "128089": {
716
+ "content": "<|reserved_special_token_84|>",
717
+ "lstrip": false,
718
+ "normalized": false,
719
+ "rstrip": false,
720
+ "single_word": false,
721
+ "special": true
722
+ },
723
+ "128090": {
724
+ "content": "<|reserved_special_token_85|>",
725
+ "lstrip": false,
726
+ "normalized": false,
727
+ "rstrip": false,
728
+ "single_word": false,
729
+ "special": true
730
+ },
731
+ "128091": {
732
+ "content": "<|reserved_special_token_86|>",
733
+ "lstrip": false,
734
+ "normalized": false,
735
+ "rstrip": false,
736
+ "single_word": false,
737
+ "special": true
738
+ },
739
+ "128092": {
740
+ "content": "<|reserved_special_token_87|>",
741
+ "lstrip": false,
742
+ "normalized": false,
743
+ "rstrip": false,
744
+ "single_word": false,
745
+ "special": true
746
+ },
747
+ "128093": {
748
+ "content": "<|reserved_special_token_88|>",
749
+ "lstrip": false,
750
+ "normalized": false,
751
+ "rstrip": false,
752
+ "single_word": false,
753
+ "special": true
754
+ },
755
+ "128094": {
756
+ "content": "<|reserved_special_token_89|>",
757
+ "lstrip": false,
758
+ "normalized": false,
759
+ "rstrip": false,
760
+ "single_word": false,
761
+ "special": true
762
+ },
763
+ "128095": {
764
+ "content": "<|reserved_special_token_90|>",
765
+ "lstrip": false,
766
+ "normalized": false,
767
+ "rstrip": false,
768
+ "single_word": false,
769
+ "special": true
770
+ },
771
+ "128096": {
772
+ "content": "<|reserved_special_token_91|>",
773
+ "lstrip": false,
774
+ "normalized": false,
775
+ "rstrip": false,
776
+ "single_word": false,
777
+ "special": true
778
+ },
779
+ "128097": {
780
+ "content": "<|reserved_special_token_92|>",
781
+ "lstrip": false,
782
+ "normalized": false,
783
+ "rstrip": false,
784
+ "single_word": false,
785
+ "special": true
786
+ },
787
+ "128098": {
788
+ "content": "<|reserved_special_token_93|>",
789
+ "lstrip": false,
790
+ "normalized": false,
791
+ "rstrip": false,
792
+ "single_word": false,
793
+ "special": true
794
+ },
795
+ "128099": {
796
+ "content": "<|reserved_special_token_94|>",
797
+ "lstrip": false,
798
+ "normalized": false,
799
+ "rstrip": false,
800
+ "single_word": false,
801
+ "special": true
802
+ },
803
+ "128100": {
804
+ "content": "<|reserved_special_token_95|>",
805
+ "lstrip": false,
806
+ "normalized": false,
807
+ "rstrip": false,
808
+ "single_word": false,
809
+ "special": true
810
+ },
811
+ "128101": {
812
+ "content": "<|reserved_special_token_96|>",
813
+ "lstrip": false,
814
+ "normalized": false,
815
+ "rstrip": false,
816
+ "single_word": false,
817
+ "special": true
818
+ },
819
+ "128102": {
820
+ "content": "<|reserved_special_token_97|>",
821
+ "lstrip": false,
822
+ "normalized": false,
823
+ "rstrip": false,
824
+ "single_word": false,
825
+ "special": true
826
+ },
827
+ "128103": {
828
+ "content": "<|reserved_special_token_98|>",
829
+ "lstrip": false,
830
+ "normalized": false,
831
+ "rstrip": false,
832
+ "single_word": false,
833
+ "special": true
834
+ },
835
+ "128104": {
836
+ "content": "<|reserved_special_token_99|>",
837
+ "lstrip": false,
838
+ "normalized": false,
839
+ "rstrip": false,
840
+ "single_word": false,
841
+ "special": true
842
+ },
843
+ "128105": {
844
+ "content": "<|reserved_special_token_100|>",
845
+ "lstrip": false,
846
+ "normalized": false,
847
+ "rstrip": false,
848
+ "single_word": false,
849
+ "special": true
850
+ },
851
+ "128106": {
852
+ "content": "<|reserved_special_token_101|>",
853
+ "lstrip": false,
854
+ "normalized": false,
855
+ "rstrip": false,
856
+ "single_word": false,
857
+ "special": true
858
+ },
859
+ "128107": {
860
+ "content": "<|reserved_special_token_102|>",
861
+ "lstrip": false,
862
+ "normalized": false,
863
+ "rstrip": false,
864
+ "single_word": false,
865
+ "special": true
866
+ },
867
+ "128108": {
868
+ "content": "<|reserved_special_token_103|>",
869
+ "lstrip": false,
870
+ "normalized": false,
871
+ "rstrip": false,
872
+ "single_word": false,
873
+ "special": true
874
+ },
875
+ "128109": {
876
+ "content": "<|reserved_special_token_104|>",
877
+ "lstrip": false,
878
+ "normalized": false,
879
+ "rstrip": false,
880
+ "single_word": false,
881
+ "special": true
882
+ },
883
+ "128110": {
884
+ "content": "<|reserved_special_token_105|>",
885
+ "lstrip": false,
886
+ "normalized": false,
887
+ "rstrip": false,
888
+ "single_word": false,
889
+ "special": true
890
+ },
891
+ "128111": {
892
+ "content": "<|reserved_special_token_106|>",
893
+ "lstrip": false,
894
+ "normalized": false,
895
+ "rstrip": false,
896
+ "single_word": false,
897
+ "special": true
898
+ },
899
+ "128112": {
900
+ "content": "<|reserved_special_token_107|>",
901
+ "lstrip": false,
902
+ "normalized": false,
903
+ "rstrip": false,
904
+ "single_word": false,
905
+ "special": true
906
+ },
907
+ "128113": {
908
+ "content": "<|reserved_special_token_108|>",
909
+ "lstrip": false,
910
+ "normalized": false,
911
+ "rstrip": false,
912
+ "single_word": false,
913
+ "special": true
914
+ },
915
+ "128114": {
916
+ "content": "<|reserved_special_token_109|>",
917
+ "lstrip": false,
918
+ "normalized": false,
919
+ "rstrip": false,
920
+ "single_word": false,
921
+ "special": true
922
+ },
923
+ "128115": {
924
+ "content": "<|reserved_special_token_110|>",
925
+ "lstrip": false,
926
+ "normalized": false,
927
+ "rstrip": false,
928
+ "single_word": false,
929
+ "special": true
930
+ },
931
+ "128116": {
932
+ "content": "<|reserved_special_token_111|>",
933
+ "lstrip": false,
934
+ "normalized": false,
935
+ "rstrip": false,
936
+ "single_word": false,
937
+ "special": true
938
+ },
939
+ "128117": {
940
+ "content": "<|reserved_special_token_112|>",
941
+ "lstrip": false,
942
+ "normalized": false,
943
+ "rstrip": false,
944
+ "single_word": false,
945
+ "special": true
946
+ },
947
+ "128118": {
948
+ "content": "<|reserved_special_token_113|>",
949
+ "lstrip": false,
950
+ "normalized": false,
951
+ "rstrip": false,
952
+ "single_word": false,
953
+ "special": true
954
+ },
955
+ "128119": {
956
+ "content": "<|reserved_special_token_114|>",
957
+ "lstrip": false,
958
+ "normalized": false,
959
+ "rstrip": false,
960
+ "single_word": false,
961
+ "special": true
962
+ },
963
+ "128120": {
964
+ "content": "<|reserved_special_token_115|>",
965
+ "lstrip": false,
966
+ "normalized": false,
967
+ "rstrip": false,
968
+ "single_word": false,
969
+ "special": true
970
+ },
971
+ "128121": {
972
+ "content": "<|reserved_special_token_116|>",
973
+ "lstrip": false,
974
+ "normalized": false,
975
+ "rstrip": false,
976
+ "single_word": false,
977
+ "special": true
978
+ },
979
+ "128122": {
980
+ "content": "<|reserved_special_token_117|>",
981
+ "lstrip": false,
982
+ "normalized": false,
983
+ "rstrip": false,
984
+ "single_word": false,
985
+ "special": true
986
+ },
987
+ "128123": {
988
+ "content": "<|reserved_special_token_118|>",
989
+ "lstrip": false,
990
+ "normalized": false,
991
+ "rstrip": false,
992
+ "single_word": false,
993
+ "special": true
994
+ },
995
+ "128124": {
996
+ "content": "<|reserved_special_token_119|>",
997
+ "lstrip": false,
998
+ "normalized": false,
999
+ "rstrip": false,
1000
+ "single_word": false,
1001
+ "special": true
1002
+ },
1003
+ "128125": {
1004
+ "content": "<|reserved_special_token_120|>",
1005
+ "lstrip": false,
1006
+ "normalized": false,
1007
+ "rstrip": false,
1008
+ "single_word": false,
1009
+ "special": true
1010
+ },
1011
+ "128126": {
1012
+ "content": "<|reserved_special_token_121|>",
1013
+ "lstrip": false,
1014
+ "normalized": false,
1015
+ "rstrip": false,
1016
+ "single_word": false,
1017
+ "special": true
1018
+ },
1019
+ "128127": {
1020
+ "content": "<|reserved_special_token_122|>",
1021
+ "lstrip": false,
1022
+ "normalized": false,
1023
+ "rstrip": false,
1024
+ "single_word": false,
1025
+ "special": true
1026
+ },
1027
+ "128128": {
1028
+ "content": "<|reserved_special_token_123|>",
1029
+ "lstrip": false,
1030
+ "normalized": false,
1031
+ "rstrip": false,
1032
+ "single_word": false,
1033
+ "special": true
1034
+ },
1035
+ "128129": {
1036
+ "content": "<|reserved_special_token_124|>",
1037
+ "lstrip": false,
1038
+ "normalized": false,
1039
+ "rstrip": false,
1040
+ "single_word": false,
1041
+ "special": true
1042
+ },
1043
+ "128130": {
1044
+ "content": "<|reserved_special_token_125|>",
1045
+ "lstrip": false,
1046
+ "normalized": false,
1047
+ "rstrip": false,
1048
+ "single_word": false,
1049
+ "special": true
1050
+ },
1051
+ "128131": {
1052
+ "content": "<|reserved_special_token_126|>",
1053
+ "lstrip": false,
1054
+ "normalized": false,
1055
+ "rstrip": false,
1056
+ "single_word": false,
1057
+ "special": true
1058
+ },
1059
+ "128132": {
1060
+ "content": "<|reserved_special_token_127|>",
1061
+ "lstrip": false,
1062
+ "normalized": false,
1063
+ "rstrip": false,
1064
+ "single_word": false,
1065
+ "special": true
1066
+ },
1067
+ "128133": {
1068
+ "content": "<|reserved_special_token_128|>",
1069
+ "lstrip": false,
1070
+ "normalized": false,
1071
+ "rstrip": false,
1072
+ "single_word": false,
1073
+ "special": true
1074
+ },
1075
+ "128134": {
1076
+ "content": "<|reserved_special_token_129|>",
1077
+ "lstrip": false,
1078
+ "normalized": false,
1079
+ "rstrip": false,
1080
+ "single_word": false,
1081
+ "special": true
1082
+ },
1083
+ "128135": {
1084
+ "content": "<|reserved_special_token_130|>",
1085
+ "lstrip": false,
1086
+ "normalized": false,
1087
+ "rstrip": false,
1088
+ "single_word": false,
1089
+ "special": true
1090
+ },
1091
+ "128136": {
1092
+ "content": "<|reserved_special_token_131|>",
1093
+ "lstrip": false,
1094
+ "normalized": false,
1095
+ "rstrip": false,
1096
+ "single_word": false,
1097
+ "special": true
1098
+ },
1099
+ "128137": {
1100
+ "content": "<|reserved_special_token_132|>",
1101
+ "lstrip": false,
1102
+ "normalized": false,
1103
+ "rstrip": false,
1104
+ "single_word": false,
1105
+ "special": true
1106
+ },
1107
+ "128138": {
1108
+ "content": "<|reserved_special_token_133|>",
1109
+ "lstrip": false,
1110
+ "normalized": false,
1111
+ "rstrip": false,
1112
+ "single_word": false,
1113
+ "special": true
1114
+ },
1115
+ "128139": {
1116
+ "content": "<|reserved_special_token_134|>",
1117
+ "lstrip": false,
1118
+ "normalized": false,
1119
+ "rstrip": false,
1120
+ "single_word": false,
1121
+ "special": true
1122
+ },
1123
+ "128140": {
1124
+ "content": "<|reserved_special_token_135|>",
1125
+ "lstrip": false,
1126
+ "normalized": false,
1127
+ "rstrip": false,
1128
+ "single_word": false,
1129
+ "special": true
1130
+ },
1131
+ "128141": {
1132
+ "content": "<|reserved_special_token_136|>",
1133
+ "lstrip": false,
1134
+ "normalized": false,
1135
+ "rstrip": false,
1136
+ "single_word": false,
1137
+ "special": true
1138
+ },
1139
+ "128142": {
1140
+ "content": "<|reserved_special_token_137|>",
1141
+ "lstrip": false,
1142
+ "normalized": false,
1143
+ "rstrip": false,
1144
+ "single_word": false,
1145
+ "special": true
1146
+ },
1147
+ "128143": {
1148
+ "content": "<|reserved_special_token_138|>",
1149
+ "lstrip": false,
1150
+ "normalized": false,
1151
+ "rstrip": false,
1152
+ "single_word": false,
1153
+ "special": true
1154
+ },
1155
+ "128144": {
1156
+ "content": "<|reserved_special_token_139|>",
1157
+ "lstrip": false,
1158
+ "normalized": false,
1159
+ "rstrip": false,
1160
+ "single_word": false,
1161
+ "special": true
1162
+ },
1163
+ "128145": {
1164
+ "content": "<|reserved_special_token_140|>",
1165
+ "lstrip": false,
1166
+ "normalized": false,
1167
+ "rstrip": false,
1168
+ "single_word": false,
1169
+ "special": true
1170
+ },
1171
+ "128146": {
1172
+ "content": "<|reserved_special_token_141|>",
1173
+ "lstrip": false,
1174
+ "normalized": false,
1175
+ "rstrip": false,
1176
+ "single_word": false,
1177
+ "special": true
1178
+ },
1179
+ "128147": {
1180
+ "content": "<|reserved_special_token_142|>",
1181
+ "lstrip": false,
1182
+ "normalized": false,
1183
+ "rstrip": false,
1184
+ "single_word": false,
1185
+ "special": true
1186
+ },
1187
+ "128148": {
1188
+ "content": "<|reserved_special_token_143|>",
1189
+ "lstrip": false,
1190
+ "normalized": false,
1191
+ "rstrip": false,
1192
+ "single_word": false,
1193
+ "special": true
1194
+ },
1195
+ "128149": {
1196
+ "content": "<|reserved_special_token_144|>",
1197
+ "lstrip": false,
1198
+ "normalized": false,
1199
+ "rstrip": false,
1200
+ "single_word": false,
1201
+ "special": true
1202
+ },
1203
+ "128150": {
1204
+ "content": "<|reserved_special_token_145|>",
1205
+ "lstrip": false,
1206
+ "normalized": false,
1207
+ "rstrip": false,
1208
+ "single_word": false,
1209
+ "special": true
1210
+ },
1211
+ "128151": {
1212
+ "content": "<|reserved_special_token_146|>",
1213
+ "lstrip": false,
1214
+ "normalized": false,
1215
+ "rstrip": false,
1216
+ "single_word": false,
1217
+ "special": true
1218
+ },
1219
+ "128152": {
1220
+ "content": "<|reserved_special_token_147|>",
1221
+ "lstrip": false,
1222
+ "normalized": false,
1223
+ "rstrip": false,
1224
+ "single_word": false,
1225
+ "special": true
1226
+ },
1227
+ "128153": {
1228
+ "content": "<|reserved_special_token_148|>",
1229
+ "lstrip": false,
1230
+ "normalized": false,
1231
+ "rstrip": false,
1232
+ "single_word": false,
1233
+ "special": true
1234
+ },
1235
+ "128154": {
1236
+ "content": "<|reserved_special_token_149|>",
1237
+ "lstrip": false,
1238
+ "normalized": false,
1239
+ "rstrip": false,
1240
+ "single_word": false,
1241
+ "special": true
1242
+ },
1243
+ "128155": {
1244
+ "content": "<|reserved_special_token_150|>",
1245
+ "lstrip": false,
1246
+ "normalized": false,
1247
+ "rstrip": false,
1248
+ "single_word": false,
1249
+ "special": true
1250
+ },
1251
+ "128156": {
1252
+ "content": "<|reserved_special_token_151|>",
1253
+ "lstrip": false,
1254
+ "normalized": false,
1255
+ "rstrip": false,
1256
+ "single_word": false,
1257
+ "special": true
1258
+ },
1259
+ "128157": {
1260
+ "content": "<|reserved_special_token_152|>",
1261
+ "lstrip": false,
1262
+ "normalized": false,
1263
+ "rstrip": false,
1264
+ "single_word": false,
1265
+ "special": true
1266
+ },
1267
+ "128158": {
1268
+ "content": "<|reserved_special_token_153|>",
1269
+ "lstrip": false,
1270
+ "normalized": false,
1271
+ "rstrip": false,
1272
+ "single_word": false,
1273
+ "special": true
1274
+ },
1275
+ "128159": {
1276
+ "content": "<|reserved_special_token_154|>",
1277
+ "lstrip": false,
1278
+ "normalized": false,
1279
+ "rstrip": false,
1280
+ "single_word": false,
1281
+ "special": true
1282
+ },
1283
+ "128160": {
1284
+ "content": "<|reserved_special_token_155|>",
1285
+ "lstrip": false,
1286
+ "normalized": false,
1287
+ "rstrip": false,
1288
+ "single_word": false,
1289
+ "special": true
1290
+ },
1291
+ "128161": {
1292
+ "content": "<|reserved_special_token_156|>",
1293
+ "lstrip": false,
1294
+ "normalized": false,
1295
+ "rstrip": false,
1296
+ "single_word": false,
1297
+ "special": true
1298
+ },
1299
+ "128162": {
1300
+ "content": "<|reserved_special_token_157|>",
1301
+ "lstrip": false,
1302
+ "normalized": false,
1303
+ "rstrip": false,
1304
+ "single_word": false,
1305
+ "special": true
1306
+ },
1307
+ "128163": {
1308
+ "content": "<|reserved_special_token_158|>",
1309
+ "lstrip": false,
1310
+ "normalized": false,
1311
+ "rstrip": false,
1312
+ "single_word": false,
1313
+ "special": true
1314
+ },
1315
+ "128164": {
1316
+ "content": "<|reserved_special_token_159|>",
1317
+ "lstrip": false,
1318
+ "normalized": false,
1319
+ "rstrip": false,
1320
+ "single_word": false,
1321
+ "special": true
1322
+ },
1323
+ "128165": {
1324
+ "content": "<|reserved_special_token_160|>",
1325
+ "lstrip": false,
1326
+ "normalized": false,
1327
+ "rstrip": false,
1328
+ "single_word": false,
1329
+ "special": true
1330
+ },
1331
+ "128166": {
1332
+ "content": "<|reserved_special_token_161|>",
1333
+ "lstrip": false,
1334
+ "normalized": false,
1335
+ "rstrip": false,
1336
+ "single_word": false,
1337
+ "special": true
1338
+ },
1339
+ "128167": {
1340
+ "content": "<|reserved_special_token_162|>",
1341
+ "lstrip": false,
1342
+ "normalized": false,
1343
+ "rstrip": false,
1344
+ "single_word": false,
1345
+ "special": true
1346
+ },
1347
+ "128168": {
1348
+ "content": "<|reserved_special_token_163|>",
1349
+ "lstrip": false,
1350
+ "normalized": false,
1351
+ "rstrip": false,
1352
+ "single_word": false,
1353
+ "special": true
1354
+ },
1355
+ "128169": {
1356
+ "content": "<|reserved_special_token_164|>",
1357
+ "lstrip": false,
1358
+ "normalized": false,
1359
+ "rstrip": false,
1360
+ "single_word": false,
1361
+ "special": true
1362
+ },
1363
+ "128170": {
1364
+ "content": "<|reserved_special_token_165|>",
1365
+ "lstrip": false,
1366
+ "normalized": false,
1367
+ "rstrip": false,
1368
+ "single_word": false,
1369
+ "special": true
1370
+ },
1371
+ "128171": {
1372
+ "content": "<|reserved_special_token_166|>",
1373
+ "lstrip": false,
1374
+ "normalized": false,
1375
+ "rstrip": false,
1376
+ "single_word": false,
1377
+ "special": true
1378
+ },
1379
+ "128172": {
1380
+ "content": "<|reserved_special_token_167|>",
1381
+ "lstrip": false,
1382
+ "normalized": false,
1383
+ "rstrip": false,
1384
+ "single_word": false,
1385
+ "special": true
1386
+ },
1387
+ "128173": {
1388
+ "content": "<|reserved_special_token_168|>",
1389
+ "lstrip": false,
1390
+ "normalized": false,
1391
+ "rstrip": false,
1392
+ "single_word": false,
1393
+ "special": true
1394
+ },
1395
+ "128174": {
1396
+ "content": "<|reserved_special_token_169|>",
1397
+ "lstrip": false,
1398
+ "normalized": false,
1399
+ "rstrip": false,
1400
+ "single_word": false,
1401
+ "special": true
1402
+ },
1403
+ "128175": {
1404
+ "content": "<|reserved_special_token_170|>",
1405
+ "lstrip": false,
1406
+ "normalized": false,
1407
+ "rstrip": false,
1408
+ "single_word": false,
1409
+ "special": true
1410
+ },
1411
+ "128176": {
1412
+ "content": "<|reserved_special_token_171|>",
1413
+ "lstrip": false,
1414
+ "normalized": false,
1415
+ "rstrip": false,
1416
+ "single_word": false,
1417
+ "special": true
1418
+ },
1419
+ "128177": {
1420
+ "content": "<|reserved_special_token_172|>",
1421
+ "lstrip": false,
1422
+ "normalized": false,
1423
+ "rstrip": false,
1424
+ "single_word": false,
1425
+ "special": true
1426
+ },
1427
+ "128178": {
1428
+ "content": "<|reserved_special_token_173|>",
1429
+ "lstrip": false,
1430
+ "normalized": false,
1431
+ "rstrip": false,
1432
+ "single_word": false,
1433
+ "special": true
1434
+ },
1435
+ "128179": {
1436
+ "content": "<|reserved_special_token_174|>",
1437
+ "lstrip": false,
1438
+ "normalized": false,
1439
+ "rstrip": false,
1440
+ "single_word": false,
1441
+ "special": true
1442
+ },
1443
+ "128180": {
1444
+ "content": "<|reserved_special_token_175|>",
1445
+ "lstrip": false,
1446
+ "normalized": false,
1447
+ "rstrip": false,
1448
+ "single_word": false,
1449
+ "special": true
1450
+ },
1451
+ "128181": {
1452
+ "content": "<|reserved_special_token_176|>",
1453
+ "lstrip": false,
1454
+ "normalized": false,
1455
+ "rstrip": false,
1456
+ "single_word": false,
1457
+ "special": true
1458
+ },
1459
+ "128182": {
1460
+ "content": "<|reserved_special_token_177|>",
1461
+ "lstrip": false,
1462
+ "normalized": false,
1463
+ "rstrip": false,
1464
+ "single_word": false,
1465
+ "special": true
1466
+ },
1467
+ "128183": {
1468
+ "content": "<|reserved_special_token_178|>",
1469
+ "lstrip": false,
1470
+ "normalized": false,
1471
+ "rstrip": false,
1472
+ "single_word": false,
1473
+ "special": true
1474
+ },
1475
+ "128184": {
1476
+ "content": "<|reserved_special_token_179|>",
1477
+ "lstrip": false,
1478
+ "normalized": false,
1479
+ "rstrip": false,
1480
+ "single_word": false,
1481
+ "special": true
1482
+ },
1483
+ "128185": {
1484
+ "content": "<|reserved_special_token_180|>",
1485
+ "lstrip": false,
1486
+ "normalized": false,
1487
+ "rstrip": false,
1488
+ "single_word": false,
1489
+ "special": true
1490
+ },
1491
+ "128186": {
1492
+ "content": "<|reserved_special_token_181|>",
1493
+ "lstrip": false,
1494
+ "normalized": false,
1495
+ "rstrip": false,
1496
+ "single_word": false,
1497
+ "special": true
1498
+ },
1499
+ "128187": {
1500
+ "content": "<|reserved_special_token_182|>",
1501
+ "lstrip": false,
1502
+ "normalized": false,
1503
+ "rstrip": false,
1504
+ "single_word": false,
1505
+ "special": true
1506
+ },
1507
+ "128188": {
1508
+ "content": "<|reserved_special_token_183|>",
1509
+ "lstrip": false,
1510
+ "normalized": false,
1511
+ "rstrip": false,
1512
+ "single_word": false,
1513
+ "special": true
1514
+ },
1515
+ "128189": {
1516
+ "content": "<|reserved_special_token_184|>",
1517
+ "lstrip": false,
1518
+ "normalized": false,
1519
+ "rstrip": false,
1520
+ "single_word": false,
1521
+ "special": true
1522
+ },
1523
+ "128190": {
1524
+ "content": "<|reserved_special_token_185|>",
1525
+ "lstrip": false,
1526
+ "normalized": false,
1527
+ "rstrip": false,
1528
+ "single_word": false,
1529
+ "special": true
1530
+ },
1531
+ "128191": {
1532
+ "content": "<|reserved_special_token_186|>",
1533
+ "lstrip": false,
1534
+ "normalized": false,
1535
+ "rstrip": false,
1536
+ "single_word": false,
1537
+ "special": true
1538
+ },
1539
+ "128192": {
1540
+ "content": "<|reserved_special_token_187|>",
1541
+ "lstrip": false,
1542
+ "normalized": false,
1543
+ "rstrip": false,
1544
+ "single_word": false,
1545
+ "special": true
1546
+ },
1547
+ "128193": {
1548
+ "content": "<|reserved_special_token_188|>",
1549
+ "lstrip": false,
1550
+ "normalized": false,
1551
+ "rstrip": false,
1552
+ "single_word": false,
1553
+ "special": true
1554
+ },
1555
+ "128194": {
1556
+ "content": "<|reserved_special_token_189|>",
1557
+ "lstrip": false,
1558
+ "normalized": false,
1559
+ "rstrip": false,
1560
+ "single_word": false,
1561
+ "special": true
1562
+ },
1563
+ "128195": {
1564
+ "content": "<|reserved_special_token_190|>",
1565
+ "lstrip": false,
1566
+ "normalized": false,
1567
+ "rstrip": false,
1568
+ "single_word": false,
1569
+ "special": true
1570
+ },
1571
+ "128196": {
1572
+ "content": "<|reserved_special_token_191|>",
1573
+ "lstrip": false,
1574
+ "normalized": false,
1575
+ "rstrip": false,
1576
+ "single_word": false,
1577
+ "special": true
1578
+ },
1579
+ "128197": {
1580
+ "content": "<|reserved_special_token_192|>",
1581
+ "lstrip": false,
1582
+ "normalized": false,
1583
+ "rstrip": false,
1584
+ "single_word": false,
1585
+ "special": true
1586
+ },
1587
+ "128198": {
1588
+ "content": "<|reserved_special_token_193|>",
1589
+ "lstrip": false,
1590
+ "normalized": false,
1591
+ "rstrip": false,
1592
+ "single_word": false,
1593
+ "special": true
1594
+ },
1595
+ "128199": {
1596
+ "content": "<|reserved_special_token_194|>",
1597
+ "lstrip": false,
1598
+ "normalized": false,
1599
+ "rstrip": false,
1600
+ "single_word": false,
1601
+ "special": true
1602
+ },
1603
+ "128200": {
1604
+ "content": "<|reserved_special_token_195|>",
1605
+ "lstrip": false,
1606
+ "normalized": false,
1607
+ "rstrip": false,
1608
+ "single_word": false,
1609
+ "special": true
1610
+ },
1611
+ "128201": {
1612
+ "content": "<|reserved_special_token_196|>",
1613
+ "lstrip": false,
1614
+ "normalized": false,
1615
+ "rstrip": false,
1616
+ "single_word": false,
1617
+ "special": true
1618
+ },
1619
+ "128202": {
1620
+ "content": "<|reserved_special_token_197|>",
1621
+ "lstrip": false,
1622
+ "normalized": false,
1623
+ "rstrip": false,
1624
+ "single_word": false,
1625
+ "special": true
1626
+ },
1627
+ "128203": {
1628
+ "content": "<|reserved_special_token_198|>",
1629
+ "lstrip": false,
1630
+ "normalized": false,
1631
+ "rstrip": false,
1632
+ "single_word": false,
1633
+ "special": true
1634
+ },
1635
+ "128204": {
1636
+ "content": "<|reserved_special_token_199|>",
1637
+ "lstrip": false,
1638
+ "normalized": false,
1639
+ "rstrip": false,
1640
+ "single_word": false,
1641
+ "special": true
1642
+ },
1643
+ "128205": {
1644
+ "content": "<|reserved_special_token_200|>",
1645
+ "lstrip": false,
1646
+ "normalized": false,
1647
+ "rstrip": false,
1648
+ "single_word": false,
1649
+ "special": true
1650
+ },
1651
+ "128206": {
1652
+ "content": "<|reserved_special_token_201|>",
1653
+ "lstrip": false,
1654
+ "normalized": false,
1655
+ "rstrip": false,
1656
+ "single_word": false,
1657
+ "special": true
1658
+ },
1659
+ "128207": {
1660
+ "content": "<|reserved_special_token_202|>",
1661
+ "lstrip": false,
1662
+ "normalized": false,
1663
+ "rstrip": false,
1664
+ "single_word": false,
1665
+ "special": true
1666
+ },
1667
+ "128208": {
1668
+ "content": "<|reserved_special_token_203|>",
1669
+ "lstrip": false,
1670
+ "normalized": false,
1671
+ "rstrip": false,
1672
+ "single_word": false,
1673
+ "special": true
1674
+ },
1675
+ "128209": {
1676
+ "content": "<|reserved_special_token_204|>",
1677
+ "lstrip": false,
1678
+ "normalized": false,
1679
+ "rstrip": false,
1680
+ "single_word": false,
1681
+ "special": true
1682
+ },
1683
+ "128210": {
1684
+ "content": "<|reserved_special_token_205|>",
1685
+ "lstrip": false,
1686
+ "normalized": false,
1687
+ "rstrip": false,
1688
+ "single_word": false,
1689
+ "special": true
1690
+ },
1691
+ "128211": {
1692
+ "content": "<|reserved_special_token_206|>",
1693
+ "lstrip": false,
1694
+ "normalized": false,
1695
+ "rstrip": false,
1696
+ "single_word": false,
1697
+ "special": true
1698
+ },
1699
+ "128212": {
1700
+ "content": "<|reserved_special_token_207|>",
1701
+ "lstrip": false,
1702
+ "normalized": false,
1703
+ "rstrip": false,
1704
+ "single_word": false,
1705
+ "special": true
1706
+ },
1707
+ "128213": {
1708
+ "content": "<|reserved_special_token_208|>",
1709
+ "lstrip": false,
1710
+ "normalized": false,
1711
+ "rstrip": false,
1712
+ "single_word": false,
1713
+ "special": true
1714
+ },
1715
+ "128214": {
1716
+ "content": "<|reserved_special_token_209|>",
1717
+ "lstrip": false,
1718
+ "normalized": false,
1719
+ "rstrip": false,
1720
+ "single_word": false,
1721
+ "special": true
1722
+ },
1723
+ "128215": {
1724
+ "content": "<|reserved_special_token_210|>",
1725
+ "lstrip": false,
1726
+ "normalized": false,
1727
+ "rstrip": false,
1728
+ "single_word": false,
1729
+ "special": true
1730
+ },
1731
+ "128216": {
1732
+ "content": "<|reserved_special_token_211|>",
1733
+ "lstrip": false,
1734
+ "normalized": false,
1735
+ "rstrip": false,
1736
+ "single_word": false,
1737
+ "special": true
1738
+ },
1739
+ "128217": {
1740
+ "content": "<|reserved_special_token_212|>",
1741
+ "lstrip": false,
1742
+ "normalized": false,
1743
+ "rstrip": false,
1744
+ "single_word": false,
1745
+ "special": true
1746
+ },
1747
+ "128218": {
1748
+ "content": "<|reserved_special_token_213|>",
1749
+ "lstrip": false,
1750
+ "normalized": false,
1751
+ "rstrip": false,
1752
+ "single_word": false,
1753
+ "special": true
1754
+ },
1755
+ "128219": {
1756
+ "content": "<|reserved_special_token_214|>",
1757
+ "lstrip": false,
1758
+ "normalized": false,
1759
+ "rstrip": false,
1760
+ "single_word": false,
1761
+ "special": true
1762
+ },
1763
+ "128220": {
1764
+ "content": "<|reserved_special_token_215|>",
1765
+ "lstrip": false,
1766
+ "normalized": false,
1767
+ "rstrip": false,
1768
+ "single_word": false,
1769
+ "special": true
1770
+ },
1771
+ "128221": {
1772
+ "content": "<|reserved_special_token_216|>",
1773
+ "lstrip": false,
1774
+ "normalized": false,
1775
+ "rstrip": false,
1776
+ "single_word": false,
1777
+ "special": true
1778
+ },
1779
+ "128222": {
1780
+ "content": "<|reserved_special_token_217|>",
1781
+ "lstrip": false,
1782
+ "normalized": false,
1783
+ "rstrip": false,
1784
+ "single_word": false,
1785
+ "special": true
1786
+ },
1787
+ "128223": {
1788
+ "content": "<|reserved_special_token_218|>",
1789
+ "lstrip": false,
1790
+ "normalized": false,
1791
+ "rstrip": false,
1792
+ "single_word": false,
1793
+ "special": true
1794
+ },
1795
+ "128224": {
1796
+ "content": "<|reserved_special_token_219|>",
1797
+ "lstrip": false,
1798
+ "normalized": false,
1799
+ "rstrip": false,
1800
+ "single_word": false,
1801
+ "special": true
1802
+ },
1803
+ "128225": {
1804
+ "content": "<|reserved_special_token_220|>",
1805
+ "lstrip": false,
1806
+ "normalized": false,
1807
+ "rstrip": false,
1808
+ "single_word": false,
1809
+ "special": true
1810
+ },
1811
+ "128226": {
1812
+ "content": "<|reserved_special_token_221|>",
1813
+ "lstrip": false,
1814
+ "normalized": false,
1815
+ "rstrip": false,
1816
+ "single_word": false,
1817
+ "special": true
1818
+ },
1819
+ "128227": {
1820
+ "content": "<|reserved_special_token_222|>",
1821
+ "lstrip": false,
1822
+ "normalized": false,
1823
+ "rstrip": false,
1824
+ "single_word": false,
1825
+ "special": true
1826
+ },
1827
+ "128228": {
1828
+ "content": "<|reserved_special_token_223|>",
1829
+ "lstrip": false,
1830
+ "normalized": false,
1831
+ "rstrip": false,
1832
+ "single_word": false,
1833
+ "special": true
1834
+ },
1835
+ "128229": {
1836
+ "content": "<|reserved_special_token_224|>",
1837
+ "lstrip": false,
1838
+ "normalized": false,
1839
+ "rstrip": false,
1840
+ "single_word": false,
1841
+ "special": true
1842
+ },
1843
+ "128230": {
1844
+ "content": "<|reserved_special_token_225|>",
1845
+ "lstrip": false,
1846
+ "normalized": false,
1847
+ "rstrip": false,
1848
+ "single_word": false,
1849
+ "special": true
1850
+ },
1851
+ "128231": {
1852
+ "content": "<|reserved_special_token_226|>",
1853
+ "lstrip": false,
1854
+ "normalized": false,
1855
+ "rstrip": false,
1856
+ "single_word": false,
1857
+ "special": true
1858
+ },
1859
+ "128232": {
1860
+ "content": "<|reserved_special_token_227|>",
1861
+ "lstrip": false,
1862
+ "normalized": false,
1863
+ "rstrip": false,
1864
+ "single_word": false,
1865
+ "special": true
1866
+ },
1867
+ "128233": {
1868
+ "content": "<|reserved_special_token_228|>",
1869
+ "lstrip": false,
1870
+ "normalized": false,
1871
+ "rstrip": false,
1872
+ "single_word": false,
1873
+ "special": true
1874
+ },
1875
+ "128234": {
1876
+ "content": "<|reserved_special_token_229|>",
1877
+ "lstrip": false,
1878
+ "normalized": false,
1879
+ "rstrip": false,
1880
+ "single_word": false,
1881
+ "special": true
1882
+ },
1883
+ "128235": {
1884
+ "content": "<|reserved_special_token_230|>",
1885
+ "lstrip": false,
1886
+ "normalized": false,
1887
+ "rstrip": false,
1888
+ "single_word": false,
1889
+ "special": true
1890
+ },
1891
+ "128236": {
1892
+ "content": "<|reserved_special_token_231|>",
1893
+ "lstrip": false,
1894
+ "normalized": false,
1895
+ "rstrip": false,
1896
+ "single_word": false,
1897
+ "special": true
1898
+ },
1899
+ "128237": {
1900
+ "content": "<|reserved_special_token_232|>",
1901
+ "lstrip": false,
1902
+ "normalized": false,
1903
+ "rstrip": false,
1904
+ "single_word": false,
1905
+ "special": true
1906
+ },
1907
+ "128238": {
1908
+ "content": "<|reserved_special_token_233|>",
1909
+ "lstrip": false,
1910
+ "normalized": false,
1911
+ "rstrip": false,
1912
+ "single_word": false,
1913
+ "special": true
1914
+ },
1915
+ "128239": {
1916
+ "content": "<|reserved_special_token_234|>",
1917
+ "lstrip": false,
1918
+ "normalized": false,
1919
+ "rstrip": false,
1920
+ "single_word": false,
1921
+ "special": true
1922
+ },
1923
+ "128240": {
1924
+ "content": "<|reserved_special_token_235|>",
1925
+ "lstrip": false,
1926
+ "normalized": false,
1927
+ "rstrip": false,
1928
+ "single_word": false,
1929
+ "special": true
1930
+ },
1931
+ "128241": {
1932
+ "content": "<|reserved_special_token_236|>",
1933
+ "lstrip": false,
1934
+ "normalized": false,
1935
+ "rstrip": false,
1936
+ "single_word": false,
1937
+ "special": true
1938
+ },
1939
+ "128242": {
1940
+ "content": "<|reserved_special_token_237|>",
1941
+ "lstrip": false,
1942
+ "normalized": false,
1943
+ "rstrip": false,
1944
+ "single_word": false,
1945
+ "special": true
1946
+ },
1947
+ "128243": {
1948
+ "content": "<|reserved_special_token_238|>",
1949
+ "lstrip": false,
1950
+ "normalized": false,
1951
+ "rstrip": false,
1952
+ "single_word": false,
1953
+ "special": true
1954
+ },
1955
+ "128244": {
1956
+ "content": "<|reserved_special_token_239|>",
1957
+ "lstrip": false,
1958
+ "normalized": false,
1959
+ "rstrip": false,
1960
+ "single_word": false,
1961
+ "special": true
1962
+ },
1963
+ "128245": {
1964
+ "content": "<|reserved_special_token_240|>",
1965
+ "lstrip": false,
1966
+ "normalized": false,
1967
+ "rstrip": false,
1968
+ "single_word": false,
1969
+ "special": true
1970
+ },
1971
+ "128246": {
1972
+ "content": "<|reserved_special_token_241|>",
1973
+ "lstrip": false,
1974
+ "normalized": false,
1975
+ "rstrip": false,
1976
+ "single_word": false,
1977
+ "special": true
1978
+ },
1979
+ "128247": {
1980
+ "content": "<|reserved_special_token_242|>",
1981
+ "lstrip": false,
1982
+ "normalized": false,
1983
+ "rstrip": false,
1984
+ "single_word": false,
1985
+ "special": true
1986
+ },
1987
+ "128248": {
1988
+ "content": "<|reserved_special_token_243|>",
1989
+ "lstrip": false,
1990
+ "normalized": false,
1991
+ "rstrip": false,
1992
+ "single_word": false,
1993
+ "special": true
1994
+ },
1995
+ "128249": {
1996
+ "content": "<|reserved_special_token_244|>",
1997
+ "lstrip": false,
1998
+ "normalized": false,
1999
+ "rstrip": false,
2000
+ "single_word": false,
2001
+ "special": true
2002
+ },
2003
+ "128250": {
2004
+ "content": "<|reserved_special_token_245|>",
2005
+ "lstrip": false,
2006
+ "normalized": false,
2007
+ "rstrip": false,
2008
+ "single_word": false,
2009
+ "special": true
2010
+ },
2011
+ "128251": {
2012
+ "content": "<|reserved_special_token_246|>",
2013
+ "lstrip": false,
2014
+ "normalized": false,
2015
+ "rstrip": false,
2016
+ "single_word": false,
2017
+ "special": true
2018
+ },
2019
+ "128252": {
2020
+ "content": "<|reserved_special_token_247|>",
2021
+ "lstrip": false,
2022
+ "normalized": false,
2023
+ "rstrip": false,
2024
+ "single_word": false,
2025
+ "special": true
2026
+ },
2027
+ "128253": {
2028
+ "content": "<|reserved_special_token_248|>",
2029
+ "lstrip": false,
2030
+ "normalized": false,
2031
+ "rstrip": false,
2032
+ "single_word": false,
2033
+ "special": true
2034
+ },
2035
+ "128254": {
2036
+ "content": "<|reserved_special_token_249|>",
2037
+ "lstrip": false,
2038
+ "normalized": false,
2039
+ "rstrip": false,
2040
+ "single_word": false,
2041
+ "special": true
2042
+ },
2043
+ "128255": {
2044
+ "content": "<|reserved_special_token_250|>",
2045
+ "lstrip": false,
2046
+ "normalized": false,
2047
+ "rstrip": false,
2048
+ "single_word": false,
2049
+ "special": true
2050
+ }
2051
+ },
2052
+ "bos_token": "<|begin_of_text|>",
2053
+ "clean_up_tokenization_spaces": true,
2054
+ "eos_token": "<|eot_id|>",
2055
+ "extra_special_tokens": {},
2056
+ "model_input_names": [
2057
+ "input_ids",
2058
+ "attention_mask"
2059
+ ],
2060
+ "model_max_length": 1024,
2061
+ "pad_token": "<|eot_id|>",
2062
+ "padding_side": "left",
2063
+ "tokenizer_class": "PreTrainedTokenizerFast",
2064
+ "truncation_side": "left"
2065
+ }
deepspeed_zero3.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ deepspeed_config:
4
+ deepspeed_multinode_launcher: standard
5
+ zero3_save_16bit_model: true
6
+ zero_optimization:
7
+ stage: 3
8
+ offload_optimizer:
9
+ device: cpu
10
+ pin_memory: true
11
+ buffer_count: 4
12
+ offload_param:
13
+ device: cpu
14
+ pin_memory: true
15
+ buffer_count: 4
16
+ model_parallel:
17
+ enable: true
18
+ tensor_parallel_size: 4
19
+ pipeline_parallel_size: 1
20
+ reduce_bucket_size: 2e5
21
+ stage3_prefetch_bucket_size: 0.1e6 # Reduced further
22
+ stage3_max_live_parameters: 1e6
23
+ stage3_max_reuse_distance: 1e5
24
+ stage3_max_live_gradients: 1e6
25
+ stage3_param_persistence_threshold: 1e4
26
+ sub_group_size: 1e9
27
+
28
+ activation_checkpointing:
29
+ partition_activations: true
30
+ contiguous_memory_optimization: true
31
+ cpu_checkpointing: true
32
+
33
+ bf16:
34
+ enabled: false
35
+ fp16:
36
+ enabled: true
37
+
38
+ optimizer:
39
+ type: AdamW
40
+ params:
41
+ eight_bit: true
42
+ lr: auto
43
+ betas: auto
44
+ eps: auto
45
+ weight_decay: auto
46
+
47
+ scheduler:
48
+ type: WarmupDecayLR
49
+ params:
50
+ num_warmup_steps: auto
51
+ num_training_steps: auto
52
+
53
+ communication_data_type: fp16
54
+ communication_bucket_size: 500000000
55
+ dist_backend: nccl
56
+ train_micro_batch_size_per_gpu: auto
57
+ gradient_clipping: 1.0
58
+
59
+ distributed_type: DEEPSPEED
60
+ downcast_bf16: auto
61
+ machine_rank: 0
62
+ main_training_function: main
63
+ mixed_precision: bf16
64
+ rdzv_backend: static
65
+ same_network: true
66
+ use_cpu: auto
main.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # main.py
2
+ from lightning.pytorch.cli import LightningCLI
3
+ from lightning.pytorch.callbacks import ModelCheckpoint
4
+ from lightning.fabric import seed_everything
5
+ from Algorithms import ActorCritic
6
+ from Tasks import TwentyQuestions
7
+ import torch
8
+ torch.backends.cuda.matmul.allow_tf32 = True # 启用 TF32 加速
9
+ torch.backends.cudnn.allow_tf32 = True
10
+ from torch.cuda.amp import GradScaler # 如果用 AMP
11
+
12
+ seed_everything(42)
13
+
14
+ def cli_main():
15
+ checkpoint_callback = ModelCheckpoint(
16
+ dirpath="models",
17
+ every_n_train_steps=200,
18
+ every_n_epochs=0,
19
+ save_top_k=1,
20
+ save_last=True # 同时保存最后一个模型
21
+ )
22
+ cli = LightningCLI(
23
+ save_config_kwargs={"overwrite": True},
24
+ trainer_defaults={ # 添加 trainer 默认配置
25
+ "callbacks": [checkpoint_callback],
26
+ "limit_val_batches": 0,
27
+ "val_check_interval": None
28
+ }
29
+ )
30
+ exit(0)
31
+
32
+ if __name__ == "__main__":
33
+ cli_main()
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lightning
2
+ jsonargparse
3
+ lightning-utilities
4
+ numpy
5
+ pytorch-lightning
6
+ safetensors
7
+ sentencepiece
8
+ torch
9
+ torchmetrics
10
+ tqdm
11
+ transformers
12
+ wandb
13
+ jsonargparse[signatures]>=4.26.1
rsa_game.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import string
3
+ import random
4
+ from typing import Optional, Tuple, List, Dict
5
+ from nltk.stem import PorterStemmer
6
+ PSTEMMER = PorterStemmer()
7
+
8
+ GAME_RULE_PROMPTS = [
9
+ """Play the game of Collabrative Rational Speech Act Game. In this game, there are two players, a speaker and a listener.
10
+
11
+ At the beginning, the speaker is assigned a target object, with which the listener is not informed. The task of the speaker is to guide the listener to guess the target object, then they win the game. However, the speaker is only allowed to give one feature per turn.
12
+
13
+ At the same time, the listener tries to figure out the target object and give the possible target referent objects at each turn. The listener can give more than one possible target referent object sets at each turn. If the listener identifies the target object, he can say "I know the target object! It is `target object`!".
14
+
15
+ At each turn, the speaker should try to give a feature of the target object which provides the listener with the most information, and the listener would update the possible target referent objects from the previous turn.
16
+
17
+ Remember, the listener can only update his referent set from the previous turn's guess, he cannot add new referents.
18
+
19
+ The less turns they take to guess the target object, the higher the score they get.
20
+ """,
21
+ """Engage in the collaborative challenge of Rational Speech Act Game, featuring two participants: one takes on the role of the speaker, while the other serves as the listener.
22
+
23
+ Initially, the speaker is secretly assigned a target object, which remains unknown to the listener. The speaker's objective is to strategically guide the listener toward identifying the target object, thereby securing victory. However, there's a constraint: the speaker may only provide one feature per turn.
24
+
25
+ Simultaneously, the listener's mission is to deduce the target object and present possible target referent objects at each turn. The listener has the flexibility to offer multiple possible target referent object sets during their turn. If the listener identifies the target object, they can declare "I know the target object! It is `target object`!".
26
+
27
+ During each turn, the speaker should aim to provide a feature of the target object that maximizes the information available to the listener, while the listener updates their possible target referent objects based on the previous turn's information.
28
+
29
+ Remember, the listener can only update their referent set from the previous turn's guess; they cannot add new referents.
30
+
31
+ The scoring system rewards efficiency: fewer turns required to guess the target object results in higher scores.
32
+ """,
33
+ """Dive into the strategic collaboration known as Rational Speech Act Game, where two players assume distinct roles: the speaker and the listener.
34
+
35
+ To commence the game, the speaker receives a target object in secret, while the listener remains unaware of this object. The speaker's challenge is to effectively guide the listener toward correctly identifying the target object, which would result in a win. However, there's a limitation: the speaker can only share one feature per turn.
36
+
37
+ Concurrently, the listener embarks on a quest to determine the target object and must present possible target referent objects at each turn. The listener enjoys the advantage of being able to propose multiple possible target referent object sets during their turn. If the listener identifies the target object, they can proclaim "I know the target object! It is `target object`!".
38
+
39
+ At each turn, the speaker should strategically select a feature of the target object that provides the listener with maximum informational value, while the listener refines their possible target referent objects based on the previous turn's revelations.
40
+
41
+ Remember, the listener can only update their referent set from the previous turn's guess; they cannot add new referents.
42
+
43
+ The scoring mechanism emphasizes efficiency: achieving the target object identification in fewer turns yields higher scores.
44
+ """,
45
+ """Step into the cooperative challenge of Rational Speech Act Game, a game designed for two players: one acting as the speaker, the other as the listener.
46
+
47
+ In the opening phase, the speaker is discreetly given a target object, which is kept hidden from the listener. The speaker's goal is to successfully lead the listener to identify the target object, thereby claiming victory. However, there's a rule: the speaker is restricted to providing only one feature per turn.
48
+
49
+ At the same time, the listener's task is to figure out the target object and present possible target referent objects at each turn. The listener has the liberty to suggest multiple possible target referent object sets during their turn. If the listener identifies the target object, they can announce "I know the target object! It is `target object`!".
50
+
51
+ During each turn, the speaker should carefully choose a feature of the target object that delivers the most valuable information to the listener, while the listener adjusts their possible target referent objects based on the previous turn's insights.
52
+
53
+ Remember, the listener can only update their referent set from the previous turn's guess; they cannot add new referents.
54
+
55
+ The scoring system prioritizes efficiency: the fewer turns taken to identify the target object, the higher the score achieved.
56
+ """,
57
+ """Immerse yourself in the collaborative strategy game called Rational Speech Act Game, featuring two distinct roles: the speaker and the listener.
58
+
59
+ As the game begins, the speaker is covertly assigned a target object, which remains a mystery to the listener. The speaker's mission is to skillfully guide the listener toward correctly identifying the target object, which would secure a win. However, there's a restriction: the speaker may only offer one feature per turn.
60
+
61
+ Meanwhile, the listener is engaged in a process of deduction, attempting to determine the target object and presenting possible target referent objects at each turn. The listener benefits from the ability to suggest multiple possible target referent object sets during their turn. If the listener identifies the target object, they can state "I know the target object! It is `target object`!".
62
+
63
+ At each turn, the speaker should strategically provide a feature of the target object that maximizes the informational benefit for the listener, while the listener updates their possible target referent objects based on the previous turn's information.
64
+
65
+ Remember, the listener can only update their referent set from the previous turn's guess; they cannot add new referents.
66
+
67
+ The scoring framework rewards efficiency: fewer turns required to identify the target object results in higher scores.
68
+ """,
69
+ """Dive into the collaborative challenge known as Rational Speech Act Game, where two players take on specific roles: the speaker and the listener.
70
+
71
+ The speaker starts the game with a secret target object assignment, while the listener remains in the dark about this object. The speaker's objective is to effectively guide the listener toward identifying the target object, thereby achieving victory. However, there's a constraint: the speaker can only provide one feature per turn.
72
+
73
+ Concurrently, the listener's challenge is to deduce the target object and present possible target referent objects at each turn. The listener enjoys the flexibility of being able to suggest multiple possible target referent object sets during their turn. If the listener identifies the target object, they can express "I know the target object! It is `target object`!".
74
+
75
+ During each turn, the speaker should aim to provide a feature of the target object that offers the listener the most valuable information, while the listener refines their possible target referent objects based on the previous turn's revelations.
76
+
77
+ Remember, the listener can only update their referent set from the previous turn's guess; they cannot add new referents.
78
+
79
+ The scoring mechanism emphasizes efficiency: achieving target object identification in fewer turns yields higher scores.
80
+ """,
81
+ """Step into the strategic collaboration called Rational Speech Act Game, where the roles of speaker and listener are central to the gameplay.
82
+
83
+ The speaker embarks on this journey with a secretly assigned target object, while the listener begins unaware of this object. The speaker's challenge is to successfully guide the listener toward identifying the target object, which would result in a win. However, there's a limitation: the speaker is only permitted to share one feature per turn.
84
+
85
+ On the other side, the listener's mission is to figure out the target object and present possible target referent objects at each turn. The listener has the advantage of being able to propose multiple possible target referent object sets during their turn. If the listener identifies the target object, they can reveal "I know the target object! It is `target object`!".
86
+
87
+ At each turn, the speaker should strategically select a feature of the target object that provides the listener with maximum informational value, while the listener updates their possible target referent objects based on the previous turn's insights.
88
+
89
+ Remember, the listener can only update their referent set from the previous turn's guess; they cannot add new referents.
90
+
91
+ The scoring system prioritizes efficiency: fewer turns taken to identify the target object results in higher scores.
92
+ """,
93
+ """Embark on the collaborative challenge of Rational Speech Act Game, where players assume the roles of either speaker or listener.
94
+
95
+ The speaker enters the game with a covertly assigned target object, while the listener starts without knowledge of this object. The speaker's goal is to effectively guide the listener toward identifying the target object, thereby securing victory. However, there's a rule: the speaker may only provide one feature per turn.
96
+
97
+ Simultaneously, the listener's task is to deduce the target object and present possible target referent objects at each turn. The listener benefits from the ability to suggest multiple possible target referent object sets during their turn. If the listener identifies the target object, they can declare "I know the target object! It is `target object`!".
98
+
99
+ During each turn, the speaker should carefully choose a feature of the target object that delivers the most valuable information to the listener, while the listener adjusts their possible target referent objects based on the previous turn's information.
100
+
101
+ Remember, the listener can only update their referent set from the previous turn's guess; they cannot add new referents.
102
+
103
+ The scoring framework rewards efficiency: the fewer turns required to identify the target object, the higher the score achieved.
104
+ """,
105
+ ]
106
+
107
+
108
+ INSTRUCT_PROMPTS = {
109
+ "speaker": """\n\n### Instruction: You are the pragmatic rational speaker. The target object is `{target}` and the object list is '{object_list}'. Provide your response including the object feature.\n\n### Response:""",
110
+ "listener": """\n\n### Instruction: Your are the pragmatic rational listener. The object list is '{object_list}'. Provide your infered target object or the possible target object sets.\n\n### Response:""",
111
+ }
112
+
113
+ PLAYER_INSTRUCT_PROMPTS = {
114
+ "speaker": "You are the pragmatic rational speaker. The target object is `{target}` and the object list is '{object_list}'. Provide your response including the object feature.",
115
+ "listener": "Your are the pragmatic rational listener. The object list is '{object_list}'. Provide your infered target object or the possible target object sets.",
116
+ }
117
+
118
+ ROLES = ["speaker", "listener"]
119
+ PREDICT_TEMP = r"i know the target object"
120
+
121
+ def calculate_reward(turn, golden_turn):
122
+ if turn <= golden_turn:
123
+ return 1
124
+ else:
125
+ return 0
126
+
127
+
128
+ def get_object_list(formatted_str):
129
+ """Reverse the item_string function to extract the original list of items"""
130
+ # If the string is empty, return empty list
131
+ if not formatted_str:
132
+ return []
133
+
134
+ # Remove surrounding brackets/parentheses/braces if present
135
+ if formatted_str.startswith("[") and formatted_str.endswith("]"):
136
+ formatted_str = formatted_str[1:-1].strip()
137
+ elif formatted_str.startswith("(") and formatted_str.endswith(")"):
138
+ formatted_str = formatted_str[1:-1].strip()
139
+ elif formatted_str.startswith("{") and formatted_str.endswith("}"):
140
+ formatted_str = formatted_str[1:-1].strip()
141
+
142
+ # Handle different formats
143
+ if ", and " in formatted_str: # Oxford comma format
144
+ parts = formatted_str.split(", and ")
145
+ if len(parts) > 1:
146
+ return [item.strip() for item in parts[0].split(", ")] + [parts[1].strip()]
147
+ elif " and " in formatted_str and ", " in formatted_str: # No Oxford comma
148
+ parts = formatted_str.split(", ")
149
+ last_part = parts[-1]
150
+ if " and " in last_part:
151
+ last_items = last_part.split(" and ")
152
+ return [item.strip() for item in parts[:-1]] + [
153
+ item.strip() for item in last_items
154
+ ]
155
+ elif " and " in formatted_str: # "and" separated format
156
+ return [item.strip() for item in formatted_str.split(" and ")]
157
+ elif ", " in formatted_str: # Comma separated format
158
+ return [item.strip() for item in formatted_str.split(", ")]
159
+
160
+ # If none of the above patterns match, return the string as a single-item list
161
+ return [formatted_str.strip()]
162
+
163
+
164
+ def get_derivative_targets(object_list, random_choice=True):
165
+ """Format a list of items in various natural ways"""
166
+ # Randomly choose from different formats
167
+ format_choices = {
168
+ "comma": lambda x: ", ".join(x),
169
+ "bracket": lambda x: "[" + ", ".join(x) + "]",
170
+ "parenthesis": lambda x: "(" + ", ".join(x) + ")",
171
+ "curly": lambda x: "{" + ", ".join(x) + "}",
172
+ "oxford_comma": lambda x: ", ".join(x[:-1]) + ", and " + x[-1],
173
+ "and_comma": lambda x: " and ".join(x[:-1]) + ", and " + x[-1],
174
+ "no_oxford_comma": lambda x: ", ".join(x[:-1]) + " and " + x[-1],
175
+ "and_separated": lambda x: " and ".join(x),
176
+ }
177
+
178
+ if random_choice:
179
+ format = random.choice(list(format_choices.keys()))
180
+ return format_choices[format](object_list)
181
+ else:
182
+ return [func(object_list) for format, func in format_choices.items()]
183
+
184
+
185
+ def has_target_object(content: str, target_object: list):
186
+ derivative_objects = get_derivative_targets(target_object, random_choice=False)
187
+ return any([object in content.lower() for object in derivative_objects])
188
+
189
+
190
+ def is_prediction(content: str, target_object: list):
191
+ if re.search(PREDICT_TEMP, content.lower()):
192
+ return True
193
+ else:
194
+ return False
195
+
196
+
197
+ def is_correct_prediction(content: str, target_object: list):
198
+ derivative_objects = get_derivative_targets(target_object, random_choice=False)
199
+ if any(object in content.lower() for object in derivative_objects):
200
+ return True
201
+
202
+ # Remove all punctuation from content except '.', '!', and ','
203
+ content = ''.join([c for c in content if not (c in set(string.punctuation) and c not in {'.', '!', ','})])
204
+ for feature in target_object:
205
+ if feature in content.lower():
206
+ continue
207
+ else:
208
+ # Use a custom stemmer to handle cases like "crispy" -> "crisp"
209
+ def custom_stem(word):
210
+ if word.endswith("y") and len(word) > 3:
211
+ return PSTEMMER.stem(word[:-1])
212
+ return PSTEMMER.stem(word)
213
+ feature_stem = custom_stem(feature.lower())
214
+ content_words = [word for word in content.lower().split()]
215
+ content_stems = [custom_stem(word) for word in content_words]
216
+ if not any(feature_stem == stem or feature_stem in stem for stem in content_stems):
217
+ return False
218
+ return True
219
+
220
+ # derivative_objects = get_derivative_targets(target_object, random_choice=False)
221
+ # predict_regex = [PREDICT_TEMP + object for object in derivative_objects]
222
+ # if any([re.search(temp, content.lower()) for temp in predict_regex]):
223
+ # return True
224
+ # else:
225
+ # return False
226
+
227
+
228
+ def get_game_outcome(history: List[Dict[str, str]], target_object: list, min_turns: int):
229
+ history_length = 0
230
+ for i, item in enumerate(history):
231
+ history_length += 1
232
+ if item["role"] == "listener":
233
+ if is_prediction(item["content"], target_object):
234
+ if is_correct_prediction(item["content"], target_object):
235
+ return "game wins", history_length
236
+ else:
237
+ return "game over", history_length
238
+ elif not has_target_object(item["content"], target_object) and i == len(history) - 1:
239
+ return "listener does not infer the target object", history_length
240
+ else:
241
+ if is_prediction(item["content"], target_object):
242
+ return "speaker breaks the rules", history_length
243
+
244
+ return "game over", history_length
245
+
246
+
247
+ def convert_game_history_to_query(history, target, object_list):
248
+ GAME_RULE_PROMPT = GAME_RULE_PROMPTS[0]
249
+
250
+ history_str = ""
251
+ for i, message in enumerate(history):
252
+ history_str += "\n - {}: {}".format(
253
+ message["role"], message["content"])
254
+
255
+ if len(history) == 0:
256
+ query = (
257
+ GAME_RULE_PROMPT +
258
+ + "The game is just initialized."
259
+ )
260
+ next_player = ROLES[0]
261
+ else:
262
+ query = (
263
+ GAME_RULE_PROMPT
264
+ + "\n### Game History:"
265
+ + history_str
266
+ )
267
+ if history[-1]["role"] == ROLES[0]:
268
+ next_player = ROLES[1]
269
+ else:
270
+ next_player = ROLES[0]
271
+
272
+ query += INSTRUCT_PROMPTS[next_player].format(
273
+ target=get_derivative_targets(target),
274
+ object_list=get_derivative_targets(object_list),
275
+ )
276
+
277
+ return query
278
+
279
+
280
+ def randomly_convert_game_history_to_query(history, target, object_list, **kwargs):
281
+ if "random_seed" in kwargs:
282
+ random.seed(kwargs["random_seed"])
283
+
284
+ if len(history) == 0:
285
+ next_player = ROLES[0]
286
+ else:
287
+ if history[-1]["role"] == ROLES[0]:
288
+ next_player = ROLES[1]
289
+ else:
290
+ next_player = ROLES[0]
291
+
292
+ dialog_prefix = "\n" + random.choice(
293
+ ["\n - ", "\n### ", "\n## ", "\n# ", "\n *** ", "\n **", "\n\n"]
294
+ )
295
+ answer_str, question_str = random.choice(
296
+ [
297
+ (next_player, ROLES[1] if next_player == ROLES[0] else ROLES[0]),
298
+ ("Assistant", "Human"),
299
+ ("Answer", "Question"),
300
+ ("Response", "Query"),
301
+ ("A", "Q"),
302
+ ]
303
+ )
304
+
305
+ player_prefix = {
306
+ ROLES[0]: answer_str if next_player == ROLES[0] else question_str,
307
+ ROLES[1]: answer_str if next_player == ROLES[1] else question_str,
308
+ }
309
+
310
+ history_str = ""
311
+ for i, message in enumerate(history):
312
+ history_str += "{}{}: {}".format(
313
+ dialog_prefix, player_prefix[message["role"]], message["content"]
314
+ )
315
+
316
+ prompt_type = random.choice(["chat", "chat_inverse", "alpaca"])
317
+ system_prefix = random.choice(["Rules", "Game Rule", "System"])
318
+
319
+ system_prompt = random.choice(GAME_RULE_PROMPTS)
320
+
321
+ if isinstance(object_list, str):
322
+ objects_str = object_list
323
+ else:
324
+ objects_str = get_derivative_targets(object_list)
325
+ if "chat" in prompt_type:
326
+ system_prompt += "\n\n" + PLAYER_INSTRUCT_PROMPTS[next_player].format(
327
+ target=get_derivative_targets(target),
328
+ object_list=objects_str,
329
+ )
330
+
331
+ if len(history) == 0:
332
+ history_str = ""
333
+ system_prompt += "The game is just initialized. "
334
+
335
+ system_str = f"{dialog_prefix}{system_prefix}: {system_prompt}"
336
+ if "inverse" in prompt_type:
337
+ query = (
338
+ history_str
339
+ + system_str
340
+ + dialog_prefix
341
+ + player_prefix[next_player]
342
+ + ": "
343
+ )
344
+ else:
345
+ query = (
346
+ system_str
347
+ + history_str
348
+ + dialog_prefix
349
+ + player_prefix[next_player]
350
+ + ": "
351
+ )
352
+
353
+ elif prompt_type == "alpaca":
354
+ if random.uniform(0, 1) < 0.2:
355
+ system_prompt = system_prefix + ": " + system_prompt
356
+
357
+ if len(history) == 0:
358
+ query = system_prompt + "The game is just initialized. "
359
+ else:
360
+ query = (
361
+ system_prompt + dialog_prefix + "Game History:" + history_str + "\n\n"
362
+ )
363
+
364
+ if random.uniform(0, 1) < 0.2:
365
+ query += (
366
+ PLAYER_INSTRUCT_PROMPTS[next_player].format(
367
+ target=target,
368
+ object_list=objects_str,
369
+ )[:-1]
370
+ + ": "
371
+ )
372
+ else:
373
+ query += (
374
+ PLAYER_INSTRUCT_PROMPTS[next_player].format(
375
+ target=target,
376
+ object_list=objects_str,
377
+ )
378
+ + dialog_prefix
379
+ + player_prefix[next_player]
380
+ + ": "
381
+ )
382
+
383
+ return query
setup.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import setuptools
3
+
4
+ setuptools.setup(
5
+ name="archer",
6
+ version='0.1.0',
7
+ url="https://github.com/andreazanette/OfflineArcher.git",
8
+ author=("Andrea Zanette"),
9
+ description="Research code for Offline ArCHer (Actor Critic Framework with Hierarchical Structures)",
10
+ long_description=open("README.md", "r", encoding='utf-8').read(),
11
+ long_description_content_type="text/markdown",
12
+ keywords='ArCHer',
13
+ license='MIT',
14
+ packages=setuptools.find_packages(),
15
+ install_requires=open("requirements.txt", "r").read().split(),
16
+ include_package_data=True,
17
+ python_requires='>=3.7',
18
+ classifiers=[
19
+ 'Intended Audience :: Science/Research',
20
+ 'License :: OSI Approved :: MIT License',
21
+ 'Programming Language :: Python :: 3',
22
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
23
+ ],
24
+ )
submit_BC_TwentyQuestions.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -l
2
+ # SLURM SUBMIT SCRIPT
3
+
4
+ #SBATCH --nodelist=node-gpu01
5
+ #SBATCH --gres=gpu:1 # Request N GPUs per machine
6
+
7
+ . init.sh
8
+
9
+ lr=1e-4
10
+ batch_size=32
11
+ accumulate_grad_batches=4 #8
12
+
13
+ python main.py fit \
14
+ --data=TwentyQuestions \
15
+ --data.batch_size=$batch_size \
16
+ --data.n_traj_eval=64 \
17
+ --model=BehaviouralCloning \
18
+ --model.lr=$lr \
19
+ --trainer.fast_dev_run=False \
20
+ --trainer.max_epoch=10 \
21
+ --trainer.accumulate_grad_batches=$accumulate_grad_batches \
22
+ --trainer.logger=WandbLogger \
23
+ --trainer.logger.init_args.project="TwentyQuestions-Official" \
24
+ --trainer.logger.init_args.name="BC-lr$lr" \
25
+ --trainer.val_check_interval=500
submit_FBC_TwentyQuestions.sh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -l
2
+ # SLURM SUBMIT SCRIPT
3
+
4
+ #SBATCH --nodelist=node-gpu01
5
+ #SBATCH --gres=gpu:1 # Request N GPUs per machine
6
+
7
+ . init.sh
8
+
9
+ lr=1e-4
10
+ batch_size=32
11
+ accumulate_grad_batches=4 #8
12
+
13
+ python main.py fit \
14
+ --data=TwentyQuestions \
15
+ --data.batch_size=$batch_size \
16
+ --data.n_traj_eval=64 \
17
+ --model=FilteredBehaviouralCloning \
18
+ --model.lr=$lr \
19
+ --model.filter=0.1 \
20
+ --trainer.fast_dev_run=False \
21
+ --trainer.max_epoch=100 \
22
+ --trainer.accumulate_grad_batches=$accumulate_grad_batches \
23
+ --trainer.logger=WandbLogger \
24
+ --trainer.logger.init_args.project="TwentyQuestions-Official" \
25
+ --trainer.logger.init_args.name="FBC-lr$lr" \
26
+ --trainer.val_check_interval=500
submit_OfflineArcher_RSA.sh ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -l
2
+ # SLURM SUBMIT SCRIPT
3
+
4
+ #SBATCH --nodelist=node-gpu01
5
+ #SBATCH --gres=gpu:1 # Request N GPUs per machine
6
+
7
+ export TMPDIR=$HOME/tmp
8
+ export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:64,expandable_segments:True
9
+ export NCCL_P2P_DISABLE=1
10
+ export CUDA_LAUNCH_BLOCKING=1
11
+ export CUDA_DEVICE_MAX_CONNECTIONS=1
12
+ export NCCL_NSOCKS_PERTHREAD=4
13
+ export NCCL_SOCKET_NTHREADS=2
14
+ export PYTORCH_NO_CUDA_MEMORY_CACHING=1 # 禁用缓存
15
+ export MAX_JOBS=4 # 限制并行加载
16
+ export ACCELERATE_USE_DEEPSPEED="true"
17
+ export TORCH_USE_CUDA_DSA=1
18
+
19
+
20
+ actor_lr=1e-5
21
+ critic_lr=1e-5
22
+
23
+ critic_expectile=0.9
24
+ inv_temp=1.0
25
+
26
+ batch_size=2
27
+ accumulate_grad_batches=16 #8
28
+
29
+ echo "当前conda环境: $(which python)"
30
+ echo "python版本: $(python --version)"
31
+ echo "accelerate版本: $(accelerate --version)"
32
+ echo "CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES"
33
+ echo "nvidia-smi:"
34
+ nvidia-smi
35
+
36
+ echo "==== 测试python能否运行 ===="
37
+ python -c "print('Python可以运行')"
38
+
39
+ echo "==== 测试main.py是否存在 ===="
40
+ ls -l "$HOME/codes/OfflineArcher/main.py"
41
+
42
+ echo "==== 测试accelerate能否import torch ===="
43
+ accelerate launch --help || echo "accelerate launch命令无法运行"
44
+
45
+ echo "==== 开始正式训练 ===="
46
+
47
+ set -x # 打印每一行命令,方便debug
48
+
49
+ accelerate launch \
50
+ --config_file "$HOME/codes/OfflineArcher/deepspeed_zero3.yaml" \
51
+ --num_processes 4 \
52
+ --gpu_ids 4,5,6,7 \
53
+ --main_process_port 29500 \
54
+ "$HOME/codes/OfflineArcher/main.py" \
55
+ fit \
56
+ --data=RSAGame \
57
+ --data.batch_size=2 \
58
+ --data.base_model="Qwen3-14B" \
59
+ --data.n_traj_eval=4 \
60
+ --model=OfflineArcher \
61
+ --model.optimize_critic=True \
62
+ --model.actor_lr=$actor_lr \
63
+ --model.critic_lr=$critic_lr \
64
+ --model.discount_factor=0.99 \
65
+ --model.tau=0.05 \
66
+ --model.critic_expectile=$critic_expectile \
67
+ --model.inv_temp=$inv_temp \
68
+ --model.accumulate_grad_batches=4 \
69
+ --model.model_name_or_path="/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_word/merged_model" \
70
+ --trainer.fast_dev_run=False \
71
+ --trainer.max_epoch=1 \
72
+ --trainer.logger=WandbLogger \
73
+ --trainer.logger.init_args.project="WordTaboo-Official" \
74
+ --trainer.default_root_dir="checkpoints/archer_Qwen3-14B_word" \
75
+ --trainer.logger.init_args.name="Test-AC-critic_expectile_$critic_expectile-inv_temp_$inv_temp" \
76
+ --trainer.strategy=deepspeed_stage_3 \
77
+ --trainer.devices=4 \
78
+ --trainer.accelerator=gpu \
79
+ --trainer.precision=bf16 \
80
+ --trainer.enable_model_summary=false \
81
+ --trainer.val_check_interval=null \
82
+ --trainer.limit_val_batches=0 > Qwen3-14B_RSA_log.txt 2>&1
83
+
84
+ echo "第三个任务返回码: $?"
85
+ tail -20 Qwen3-14B_RSA_log.txt
86
+
87
+
88
+ accelerate launch \
89
+ --config_file "$HOME/codes/OfflineArcher/deepspeed_zero3.yaml" \
90
+ --num_processes 4 \
91
+ --gpu_ids 4,5,6,7 \
92
+ --main_process_port 29500 \
93
+ "$HOME/codes/OfflineArcher/main.py" \
94
+ fit \
95
+ --data=WordTaboo \
96
+ --data.batch_size=2 \
97
+ --data.base_model="Qwen3-14B" \
98
+ --data.n_traj_eval=4 \
99
+ --model=OfflineArcher \
100
+ --model.optimize_critic=True \
101
+ --model.actor_lr=$actor_lr \
102
+ --model.critic_lr=$critic_lr \
103
+ --model.discount_factor=0.99 \
104
+ --model.tau=0.05 \
105
+ --model.critic_expectile=$critic_expectile \
106
+ --model.inv_temp=$inv_temp \
107
+ --model.accumulate_grad_batches=4 \
108
+ --model.model_name_or_path="/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_word/merged_model" \
109
+ --trainer.fast_dev_run=False \
110
+ --trainer.max_epoch=1 \
111
+ --trainer.logger=WandbLogger \
112
+ --trainer.logger.init_args.project="WordTaboo-Official" \
113
+ --trainer.default_root_dir="checkpoints/archer_Qwen3-14B_word" \
114
+ --trainer.logger.init_args.name="Test-AC-critic_expectile_$critic_expectile-inv_temp_$inv_temp" \
115
+ --trainer.strategy=deepspeed_stage_3 \
116
+ --trainer.devices=4 \
117
+ --trainer.accelerator=gpu \
118
+ --trainer.precision=bf16 \
119
+ --trainer.enable_model_summary=false \
120
+ --trainer.val_check_interval=null \
121
+ --trainer.limit_val_batches=0 > Qwen3-14B_Word_log.txt 2>&1
122
+
123
+ echo "第三个任务返回码: $?"
124
+ tail -20 Qwen3-14B_Word_log.txt
125
+
126
+ accelerate launch \
127
+ --config_file "$HOME/codes/OfflineArcher/deepspeed_zero3.syaml" \
128
+ --num_processes 4 \
129
+ --gpu_ids 4,5,6,7 \
130
+ --main_process_port 29500 \
131
+ "$HOME/codes/OfflineArcher/main.py" \
132
+ fit \
133
+ --data=StrategicDialogue \
134
+ --data.batch_size=2 \
135
+ --data.base_model="Qwen3-14B" \
136
+ --data.n_traj_eval=4 \
137
+ --model=OfflineArcher \
138
+ --model.optimize_critic=True \
139
+ --model.actor_lr=$actor_lr \
140
+ --model.critic_lr=$critic_lr \
141
+ --model.discount_factor=0.99 \
142
+ --model.tau=0.05 \
143
+ --model.critic_expectile=$critic_expectile \
144
+ --model.inv_temp=$inv_temp \
145
+ --model.accumulate_grad_batches=4 \
146
+ --model.model_name_or_path="/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_strategic/merged_model" \
147
+ --trainer.fast_dev_run=False \
148
+ --trainer.max_epoch=1 \
149
+ --trainer.logger=WandbLogger \
150
+ --trainer.logger.init_args.project="Strategic-Official" \
151
+ --trainer.default_root_dir="checkpoints/archer_Qwen3-14B_strategic" \
152
+ --trainer.logger.init_args.name="Test-AC-critic_expectile_$critic_expectile-inv_temp_$inv_temp" \
153
+ --trainer.strategy=deepspeed_stage_3 \
154
+ --trainer.devices=4 \
155
+ --trainer.accelerator=gpu \
156
+ --trainer.precision=bf16 \
157
+ --trainer.enable_model_summary=false \
158
+ --trainer.val_check_interval=null \
159
+ --trainer.limit_val_batches=0 > Qwen3-14B_Strategic_log.txt 2>&1
160
+
161
+ echo "第五个任务返回码: $?"
162
+ tail -20 Qwen3-14B_Strategic_log.txt
163
+
164
+ set +x
165
+
166
+ echo "所有任务执行完毕。请检查上面各个log文件的最后20行和返回码。"
submit_OfflineArcher_TwentyQuestions.sh ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -l
2
+ # SLURM SUBMIT SCRIPT
3
+
4
+ #SBATCH --nodelist=node-gpu01
5
+ #SBATCH --gres=gpu:1 # Request N GPUs per machine
6
+
7
+ . init.sh
8
+
9
+ actor_lr=1e-4
10
+ critic_lr=1e-5
11
+
12
+ critic_expectile=0.9
13
+ inv_temp=1.0
14
+
15
+ batch_size=32
16
+ accumulate_grad_batches=4 #8
17
+
18
+ python main.py fit \
19
+ --data=TwentyQuestions \
20
+ --data.batch_size=$batch_size \
21
+ --data.n_traj_eval=64 \
22
+ --model=OfflineArcher \
23
+ --model.optimize_critic=True \
24
+ --model.actor_lr=$actor_lr \
25
+ --model.critic_lr=$critic_lr \
26
+ --model.discount_factor=0.99 \
27
+ --model.tau=0.05 \
28
+ --model.critic_expectile=$critic_expectile \
29
+ --model.inv_temp=$inv_temp \
30
+ --model.accumulate_grad_batches=$accumulate_grad_batches \
31
+ --trainer.fast_dev_run=False \
32
+ --trainer.max_epoch=10 \
33
+ --trainer.logger=WandbLogger \
34
+ --trainer.logger.init_args.project="TwentyQuestions-Official" \
35
+ --trainer.logger.init_args.name="Test-AC-critic_expectile_$critic_expectile-inv_temp_$inv_temp" \
36
+ --trainer.strategy='ddp_find_unused_parameters_true' \
37
+ --trainer.val_check_interval=250
test.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from peft import PeftModel, PeftConfig
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+ import os
5
+
6
+ # Configuration - replace these with your actual paths
7
+ LORA_MODEL_PATH = "./checkpoints/archer_Qwen3-14B_rsa" # e.g., "./lora_output"
8
+ BASE_MODEL_NAME = "/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_rsa/merged_model" # e.g., "./merged_model"
9
+
10
+ def merge_lora(base_model_name, lora_path, output_path):
11
+ # Load base model
12
+ base_model = AutoModelForCausalLM.from_pretrained(
13
+ base_model_name,
14
+ return_dict=True,
15
+ torch_dtype=torch.float16,
16
+ device_map="auto"
17
+ )
18
+
19
+ # Load tokenizer
20
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
21
+
22
+ # Load LoRA adapter
23
+ lora_model = PeftModel.from_pretrained(base_model, lora_path)
24
+
25
+ # Merge weights
26
+ merged_model = lora_model.merge_and_unload()
27
+
28
+ # Save merged model
29
+ merged_model.save_pretrained(output_path)
30
+ tokenizer.save_pretrained(output_path)
31
+ print(f"Merged model saved to {output_path}")
32
+
33
+ if __name__ == "__main__":
34
+ merge_lora(BASE_MODEL_NAME, LORA_MODEL_PATH, os.path.join(LORA_MODEL_PATH, "merged_model"))