profner_classification_master / generation_script.py
luisgasco's picture
Create generation_script.py
bb98a04 verified
# Con el output del generation script de https://huggingface.co/datasets/luisgasco/profner_ner_master
# Y añadiendo los archivos valid.tsv y train.tsv de la task 1 del dataset de Profner
import pandas as pd
from collections import defaultdict
from datasets import Dataset, DatasetDict
# 1. Cargar TSVs de etiquetas
df_train_labels = pd.read_csv("/content/train.tsv", sep="\t")
df_valid_labels = pd.read_csv("/content/valid.tsv", sep="\t")
# Unificar etiquetas en un solo dict
labels_dict = dict(zip(df_train_labels["tweet_id"], df_train_labels["label"]))
labels_dict.update(dict(zip(df_valid_labels["tweet_id"], df_valid_labels["label"])))
# 2. Cargar IDs de cada split
def load_ids(path):
with open(path, encoding="utf-8") as f:
return set(line.strip() for line in f if line.strip())
train_ids = load_ids("/content/train_ids.txt")
dev_ids = load_ids("/content/dev_ids.txt")
test_ids = load_ids("/content/test_ids.txt")
labels_dict = {str(k): v for k, v in labels_dict.items()}
train_ids = set(str(id_) for id_ in train_ids)
dev_ids = set(str(id_) for id_ in dev_ids)
test_ids = set(str(id_) for id_ in test_ids)
# 3. Leer los archivos .spacy estilo CoNLL (train + valid juntos)
def cargar_textos_conll(path):
textos = defaultdict(list)
with open(path, encoding="utf-8") as f:
for line in f:
if line.strip():
parts = line.strip().split()
if len(parts) == 5:
token, doc_id, *_ = parts
textos[doc_id].append(token)
return textos
textos_train = cargar_textos_conll("/content/train_spacy.txt")
textos_valid = cargar_textos_conll("/content/valid_spacy.txt")
textos = {**textos_train, **textos_valid}
# 4. Construir datasets por split
def construir_split(ids):
data = []
for doc_id in ids:
if doc_id in textos and doc_id in labels_dict:
text = " ".join(textos[doc_id])
label = int(labels_dict[doc_id])
data.append({"tweet_id": doc_id, "text": text, "label": label})
return Dataset.from_list(data)
# 5. Crear DatasetDict
dataset = DatasetDict({
"train": construir_split(train_ids),
"validation": construir_split(dev_ids),
"test": construir_split(test_ids),
})
from datasets import ClassLabel, Features, Value
# Definir las etiquetas de texto
label_names = ["SIN_PROFESION", "CON_PROFESION"]
# Crear esquema con ClassLabel
features = Features({
"tweet_id": Value("string"),
"text": Value("string"),
"label": ClassLabel(names=label_names)
})
# Aplicar el esquema a cada división
for split in dataset:
dataset[split] = dataset[split].cast(features)