examples/token_classification/peft_lora_ner.ipynb
#install the required libraries
!pip install -q datasets evaluate transformers seqeval
# Import required libraries
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, DataCollatorForTokenClassification, TrainingArguments, Trainer, pipeline
from peft import get_peft_model, LoraConfig, TaskType
import evaluate
import numpy as np
from huggingface_hub import notebook_login
raw_datasets = load_dataset("conll2003")
print(raw_datasets)
# Look at the tokens of the first training example
raw_datasets["train"][0]["tokens"]
# Look at the NER tags of the first training example
raw_datasets["train"][0]["ner_tags"]
# Get the label names for the NER tags
ner_feature = raw_datasets["train"].features["ner_tags"]
label_names = ner_feature.feature.names
label_names
words = raw_datasets["train"][0]["tokens"]
labels = raw_datasets["train"][0]["ner_tags"]
line1 = ""
line2 = ""
for word, label in zip(words, labels):
full_label = label_names[label]
max_length = max(len(word), len(full_label))
line1 += word + " " * (max_length - len(word) + 1)
line2 += full_label + " " * (max_length - len(full_label) + 1)
print(line1)
print(line2)
# Load the tokenizer
model_checkpoint = "bert-base-cased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
# Tokenize the first training example
inputs = tokenizer(raw_datasets["train"][0]["tokens"], is_split_into_words=True)
inputs.tokens()
def align_labels_with_tokens(labels, word_ids):
new_labels = []
current_word = None
for word_id in word_ids:
if word_id != current_word:
# Start of a new word!
current_word = word_id
label = -100 if word_id is None else labels[word_id]
new_labels.append(label)
elif word_id is None:
# Special token
new_labels.append(-100)
else:
# Same word as previous token
label = labels[word_id]
# If the label is B-XXX we change it to I-XXX
if label % 2 == 1:
label += 1
new_labels.append(label)
return new_labels
labels = raw_datasets["train"][0]["ner_tags"]
word_ids = inputs.word_ids()
print(labels)
print(align_labels_with_tokens(labels, word_ids))
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples["tokens"], truncation=True, is_split_into_words=True
)
all_labels = examples["ner_tags"]
new_labels = []
for i, labels in enumerate(all_labels):
word_ids = tokenized_inputs.word_ids(i)
new_labels.append(align_labels_with_tokens(labels, word_ids))
tokenized_inputs["labels"] = new_labels
return tokenized_inputs
tokenized_datasets = raw_datasets.map(
tokenize_and_align_labels,
batched=True,
remove_columns=raw_datasets["train"].column_names,
)
data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
for i in range(2):
print(tokenized_datasets["train"][i]["labels"])
metric = evaluate.load("seqeval")
# Create label mappings
id2label = {i: label for i, label in enumerate(label_names)}
label2id = {v: k for k, v in id2label.items()}
# Load the pre-trained model
model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id,
)
model.config.num_labels
model
# Configure LoRA (Low-Rank Adaptation) for fine-tuning
peft_config = LoraConfig(target_modules = ["query", "key"], task_type = TaskType.TOKEN_CLS)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
def compute_metrics(eval_preds):
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
# Remove ignored index (special tokens) and convert to labels
true_labels = [[label_names[l] for l in label if l != -100] for label in labels]
true_predictions = [
[label_names[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
all_metrics = metric.compute(predictions=true_predictions, references=true_labels)
return {
"precision": all_metrics["overall_precision"],
"recall": all_metrics["overall_recall"],
"f1": all_metrics["overall_f1"],
"accuracy": all_metrics["overall_accuracy"],
}
notebook_login()
args = TrainingArguments(
"bert-finetuned-ner-lora",
eval_strategy="epoch",
per_device_train_batch_size=32, # decrease this for OOM error
per_device_eval_batch_size=64,
save_strategy="epoch",
learning_rate=2e-3,
num_train_epochs=5,
weight_decay=0.01,
load_best_model_at_end=True,
do_eval=True,
do_predict=True,
metric_for_best_model="accuracy",
label_names=["labels"],
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
processing_class=tokenizer,
compute_metrics=compute_metrics
)
trainer.train()
from peft import PeftModel
# Replace this with your own checkpoint
lora_checkpoint = "./bert-finetuned-ner-lora"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
base_model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id,
)
lora_model = PeftModel.from_pretrained(base_model, lora_checkpoint)
token_classifier = pipeline(
"token-classification", model=lora_model, tokenizer=tokenizer, aggregation_strategy="simple"
)
token_classifier("My name is Jino.")