Boundless DAS#
__author__ = "Zhengxuan Wu"
__version__ = "10/05/2023"
Overview#
This tutorial aims to reproduce one key result of the Boundless DAS paper. It uses the same pricing tag dataset as in the paper. Additionally, it focuses on finding alignment for the left boundary check only.
Set-up#
try:
# This library is our indicator that the required installs
# need to be done.
import pyvene
except ModuleNotFoundError:
!pip install git+https://github.com/stanfordnlp/pyvene.git
[2024-01-11 01:35:34,365] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
import torch
from tqdm import tqdm, trange
from datasets import Dataset
from torch.utils.data import DataLoader
from transformers import get_linear_schedule_with_warmup
from torch.nn import CrossEntropyLoss
from tutorial_price_tagging_utils import (
factual_sampler,
bound_alignment_sampler,
lower_bound_alignment_example_sampler,
)
from pyvene import (
IntervenableModel,
BoundlessRotatedSpaceIntervention,
RepresentationConfig,
IntervenableConfig,
)
from pyvene import create_llama
from pyvene import set_seed, count_parameters
config, tokenizer, llama = create_llama()
_ = llama.to("cuda") # single gpu
_ = llama.eval() # always no grad on the model
You are using the default legacy behaviour of the <class 'transformers.models.llama.tokenization_llama.LlamaTokenizer'>. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thouroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565
normalizer.cc(51) LOG(INFO) precompiled_charsmap is empty. use identity normalization.
Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
loaded model
Factual performance of instruct-tuned LLaMA-7B#
raw_prealign = factual_sampler(tokenizer, 5000, game="pricing_tag")
prealign_dataset = Dataset.from_dict(
{"input_ids": raw_prealign[0], "labels": raw_prealign[1]}
)
prealign_dataset.set_format("torch", columns=["input_ids", "labels"])
prealign_dataloader = DataLoader(prealign_dataset, batch_size=8)
total_count = 0
correct_count = 0
with torch.no_grad():
for step, inputs in enumerate(tqdm(prealign_dataloader)):
for k, v in inputs.items():
if v is not None and isinstance(v, torch.Tensor):
inputs[k] = v.to(llama.device)
# aligning forward!
outputs = llama(
input_ids=inputs["input_ids"],
labels=inputs["labels"],
)
actual_test_labels = inputs["labels"][:, -1]
pred_test_labels = torch.argmax(outputs.logits[:, -1], dim=-1)
correct_labels = actual_test_labels == pred_test_labels
total_count += len(correct_labels)
correct_count += correct_labels.sum().tolist()
current_acc = round(correct_count / total_count, 2)
print(f"[WARNING: THIS NEEDS TO BE GOOD!] prealign task accuracy: {current_acc}")
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 625/625 [00:48<00:00, 12.82it/s]
[WARNING: THIS NEEDS TO BE GOOD!] prealign task accuracy: 0.92
Create training dataset for our trainable intervention (Boundless DAS)#
set_seed(42)
###################
# data loaders
###################
raw_data = bound_alignment_sampler(
tokenizer, 10000, [lower_bound_alignment_example_sampler]
)
raw_train = (
raw_data[0][:8000],
raw_data[1][:8000],
raw_data[2][:8000],
raw_data[3][:8000],
)
raw_eval = (
raw_data[0][8000:9000],
raw_data[1][8000:9000],
raw_data[2][8000:9000],
raw_data[3][8000:9000],
)
raw_test = (
raw_data[0][9000:],
raw_data[1][9000:],
raw_data[2][9000:],
raw_data[3][9000:],
)
train_dataset = Dataset.from_dict(
{
"input_ids": raw_train[0],
"source_input_ids": raw_train[1],
"labels": raw_train[2],
"intervention_ids": raw_train[3], # we will not use this field
}
).with_format("torch")
train_dataloader = DataLoader(
train_dataset,
batch_size=16,
)
eval_dataset = Dataset.from_dict(
{
"input_ids": raw_eval[0],
"source_input_ids": raw_eval[1],
"labels": raw_eval[2],
"intervention_ids": raw_eval[3], # we will not use this field
}
).with_format("torch")
eval_dataloader = DataLoader(
eval_dataset,
batch_size=16,
)
test_dataset = Dataset.from_dict(
{
"input_ids": raw_test[0],
"source_input_ids": raw_test[1],
"labels": raw_test[2],
"intervention_ids": raw_test[3], # we will not use this field
}
).with_format("torch")
test_dataloader = DataLoader(
test_dataset,
batch_size=16,
)
Boundless DAS on Position-aligned Tokens#
def simple_boundless_das_position_config(model_type, intervention_type, layer):
config = IntervenableConfig(
model_type=model_type,
representations=[
RepresentationConfig(
layer, # layer
intervention_type, # intervention type
),
],
intervention_types=BoundlessRotatedSpaceIntervention,
)
return config
config = simple_boundless_das_position_config(
type(llama), "block_output", 15
)
intervenable = IntervenableModel(config, llama)
intervenable.set_device("cuda")
intervenable.disable_model_gradients()
t_total = int(len(train_dataloader) * 3)
warm_up_steps = 0.1 * t_total
optimizer_params = []
for k, v in intervenable.interventions.items():
optimizer_params += [{"params": v[0].rotate_layer.parameters()}]
optimizer_params += [{"params": v[0].intervention_boundaries, "lr": 1e-2}]
optimizer = torch.optim.Adam(optimizer_params, lr=1e-3)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warm_up_steps, num_training_steps=t_total
)
# You can define your custom compute_metrics function.
def compute_metrics(eval_preds, eval_labels):
total_count = 0
correct_count = 0
for eval_pred, eval_label in zip(eval_preds, eval_labels):
actual_test_labels = eval_label[:, -1]
pred_test_labels = torch.argmax(eval_pred[:, -1], dim=-1)
correct_labels = actual_test_labels == pred_test_labels
total_count += len(correct_labels)
correct_count += correct_labels.sum().tolist()
accuracy = round(correct_count / total_count, 2)
return {"accuracy": accuracy}
epochs = 3
gradient_accumulation_steps = 4
total_step = 0
target_total_step = len(train_dataloader) * epochs
temperature_start = 50.0
temperature_end = 0.1
temperature_schedule = (
torch.linspace(temperature_start, temperature_end, target_total_step)
.to(torch.bfloat16)
.to("cuda")
)
intervenable.set_temperature(temperature_schedule[total_step])
def calculate_loss(logits, labels):
shift_logits = logits[..., :, :].contiguous()
shift_labels = labels[..., :].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, intervenable.model_config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
for k, v in intervenable.interventions.items():
boundary_loss = 1.0 * v[0].intervention_boundaries.sum()
loss += boundary_loss
return loss
intervenable.model.train() # train enables drop-off but no grads
print("llama trainable parameters: ", count_parameters(intervenable.model))
print("intervention trainable parameters: ", intervenable.count_parameters())
train_iterator = trange(0, int(epochs), desc="Epoch")
for epoch in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc=f"Epoch: {epoch}", position=0, leave=True
)
for step, inputs in enumerate(epoch_iterator):
for k, v in inputs.items():
if v is not None and isinstance(v, torch.Tensor):
inputs[k] = v.to("cuda")
b_s = inputs["input_ids"].shape[0]
_, counterfactual_outputs = intervenable(
{"input_ids": inputs["input_ids"]},
[{"input_ids": inputs["source_input_ids"]}],
{"sources->base": 80}, # swap 80th token
)
eval_metrics = compute_metrics(
[counterfactual_outputs.logits], [inputs["labels"]]
)
# loss and backprop
loss = calculate_loss(counterfactual_outputs.logits, inputs["labels"])
loss_str = round(loss.item(), 2)
epoch_iterator.set_postfix({"loss": loss_str, "acc": eval_metrics["accuracy"]})
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
loss.backward()
if total_step % gradient_accumulation_steps == 0:
if not (gradient_accumulation_steps > 1 and total_step == 0):
optimizer.step()
scheduler.step()
intervenable.set_zero_grad()
intervenable.set_temperature(temperature_schedule[total_step])
total_step += 1
llama trainable parameters: 0
intervention trainable parameters: 16777218
Epoch: 0: 100%|██████████████████████████████████████████████████████████████████████████████████████| 500/500 [07:05<00:00, 1.18it/s, loss=0.5, acc=0.88]
Epoch: 1: 100%|█████████████████████████████████████████████████████████████████████████████████████| 500/500 [07:58<00:00, 1.04it/s, loss=0.39, acc=0.94]
Epoch: 2: 100%|█████████████████████████████████████████████████████████████████████████████████████| 500/500 [08:19<00:00, 1.00it/s, loss=0.35, acc=0.94]
Epoch: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [23:23<00:00, 467.83s/it]
# evaluation on the test set
eval_labels = []
eval_preds = []
with torch.no_grad():
epoch_iterator = tqdm(test_dataloader, desc=f"Test")
for step, inputs in enumerate(epoch_iterator):
for k, v in inputs.items():
if v is not None and isinstance(v, torch.Tensor):
inputs[k] = v.to("cuda")
b_s = inputs["input_ids"].shape[0]
_, counterfactual_outputs = intervenable(
{"input_ids": inputs["input_ids"]},
[{"input_ids": inputs["source_input_ids"]}],
{"sources->base": 80}, # swap 80th token
)
eval_labels += [inputs["labels"]]
eval_preds += [counterfactual_outputs.logits]
eval_metrics = compute_metrics(eval_preds, eval_labels)
print(eval_metrics)
Test: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 63/63 [00:45<00:00, 1.38it/s]
{'accuracy': 0.96}