Reward model
Collection
Reward modelling
•
6 items
•
Updated
This is a test split and a validation split from the original dataset shp. I have adapted it to match the version used in RLHFlow.
Below is the code you can use to recreate this.
from datasets import load_dataset
split = 'test'
ds = load_dataset('stanfordnlp/SHP', split=split)
def filter_example(example):
prompt = example['history']
if example['labels'] == 0:
ratio = example['score_B'] * 1.0 / example['score_A']
elif example['labels'] == 1:
ratio = example['score_A'] * 1.0 / example['score_B']
if ratio > 2.0:
return True
else:
return False
ds_filtered = ds.filter(filter_example)
####
import itertools
from collections import defaultdict
import json
data_ret = defaultdict(list)
data = []
for example in ds_filtered:
prompt = example['history']
if example['score_A'] > example['score_B']:
assert example['labels'] == 1
chosen_message = [
{"content": prompt, "role": "user"},
{"content": example['human_ref_A'], "role": "assistant"},
]
rejected_message = [
{"content": prompt, "role": "user"},
{"content": example['human_ref_B'], "role": "assistant"},
]
chosen_rating = example['score_A']
rejected_rating = example['score_B']
elif example['score_A'] < example['score_B']:
assert example['labels'] == 0
chosen_message = [
{"content": prompt, "role": "user"},
{"content": example['human_ref_B'], "role": "assistant"},
]
rejected_message = [
{"content": prompt, "role": "user"},
{"content": example['human_ref_A'], "role": "assistant"},
]
chosen_rating = example['score_B']
rejected_rating = example['score_A']
else:
print("error")
data_ret[prompt].append({"rejected": rejected_message, "chosen": chosen_message, "rejected_score": rejected_rating, "chosen_score": chosen_rating})
print(len(data_ret))
for key in data_ret:
num_responses = len(data_ret[key])
data.extend(data_ret[key][:5])
print(len(data))
with open(f'shp_{split}.jsonl', 'w+', encoding='utf-8') as fout:
for d in data:
fout.write(json.dumps(d, ensure_ascii=False) + "\n")