NLP&&LLM

本文最后更新于:2024年2月8日 中午

BASE

参考链接 NLP_course ### unigram tokenization

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40

def encode_word(word, model):
best_segmentations = [{"start": 0, "score": 1}] + [
{"start": None, "score": None} for _ in range(len(word))
]
#至少每一种字长都有留下来对应的一个值.同时在位次上对应END,里面的start对应了在这个END中,在哪里有最好的分词的方法.
for start_idx in range(len(word)):
# This should be properly filled by the previous steps of the loop
best_score_at_start = best_segmentations[start_idx]["score"]
for end_idx in range(start_idx + 1, len(word) + 1):
token = word[start_idx:end_idx] # toke截取了一组值,之后通过查表进行比较
if token in model and best_score_at_start is not None:
score = model[token] + best_score_at_start
# If we have found a better segmentation ending at end_idx, we update
if (
best_segmentations[end_idx]["score"] is None
or best_segmentations[end_idx]["score"] > score
):
#这里取的是-log相当于是一个递减的函数,如果频率高,反而得到的值越低,所以这里选择高频率的留下,
#关于继承前面的值,这里的含义指的是:start end都含有一定的值,start的分词+新增的词块频率够高的情况才更新END的模块
#下面对应了两种情况,一种是后续没有end_idx直接打上标签 另一种是在当前的start + 增的 比后续的低则更改??
best_segmentations[end_idx] = {"start": start_idx, "score": score}

segmentation = best_segmentations[-1]
if segmentation["score"] is None:
# We did not find a tokenization of the word -> unknown
return ["<unk>"], None

score = segmentation["score"]
start = segmentation["start"]
end = len(word)
tokens = []
while start != 0:
tokens.insert(0, word[start:end])
next_start = best_segmentations[start]["start"]
end = start
start = next_start
tokens.insert(0, word[start:end])
return tokens, score
# 但是为什么是最后的一个?

MAIN NLP TASKS

Token classification

preparing the data

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
from datasets import load_dataset
raw_datasets = load_dataset("conll2003")

# 数据形式

DatasetDict({
train: Dataset({
features: ['chunk_tags', 'id', 'ner_tags', 'pos_tags', 'tokens'],
num_rows: 14041
})
validation: Dataset({
features: ['chunk_tags', 'id', 'ner_tags', 'pos_tags', 'tokens'],
num_rows: 3250
})
test: Dataset({
features: ['chunk_tags', 'id', 'ner_tags', 'pos_tags', 'tokens'],
num_rows: 3453
})
})
#the last column is called tokens, but it contains words in the sense that these are pre-tokenized inputs that still need to go through the tokenizer for subword tokenization

# 数据形式
ner_feature = raw_datasets["train"].features["ner_tags"]

Sequence(feature=ClassLabel(num_classes=9, names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC'], names_file=None, id=None), length=-1, id=None)

# 表示这里的训练队列的类型

解构数据表示

1
2
3
4
5
6
7
8
9
10

words = raw_datasets["train"][0]["tokens"]
labels = raw_datasets["train"][0]["ner_tags"]
line1 = ""
line2 = ""
for word, label in zip(words, labels):#不仅取出了两个列表中的元素,同时把这两个对应的数组压缩成元组
full_label = label_names[label]
max_length = max(len(word), len(full_label))
line1 += word + " " * (max_length - len(word) + 1)
line2 += full_label + " " * (max_length - len(full_label) + 1)

数据加工

1
2
3
4
5
6
7
8
from transformers import AutoTokenizer

model_checkpoint = "bert-base-cased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)

inputs = tokenizer(raw_dataset["train"][0]["tokens"],is_split_into_words = True)

inputs.word_ids()#可以正确的对齐每一个tokens的单词的位置
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#这个函数相当于分裂同时打上标签
def align_labels_with_tokens(labels,word_ids):
#labels是来自于ner_tags
new_labels = []
current_word = None
for word_id in word_ids:
if word_id != current_word:#区分是否是一个新的单词
current_word = word_id
label = -100 if word_id is None else labels[word_id]
new_labels.append(label)#放入新的label
elif word_id = None:
new_labels.append(-100)
else:
label = labels[word_id]#是在当前的labels中
if label % 2 == 1:
label += 1 #相当于把B中的值转换为I,如果是同一个单词的情况下
new_labels.append(label)
return new_labels

def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples["tokens"],truncation = True, is_split_into_words =True)
all_labels = example["ner_tags"]
new_labels = []
for i,labels in enumerate(all_labels):
word_ids = tokenized_inputs.word_ids(i)
new_labels.append(align_labels_with_tokens(labels,word_ids))

tokenized_input["labels"] = new_labels
return tokenized_inputs

#这里基本相当于是在下面进行了调用,但是这里仍然没有完成把所有的对齐附带上padding

#以下完成数据集的训练
tokenized_datasets = raw_datasets.map(
tokenize_and_align_labels,#这里的原理应该相当于直接把这个raw数据集传入进去,然后直接依照相关的函数进行输出
batched = True,
remove_columns = raw_datasets["train"].column_names,#这里相当于把训练后的这一列删除掉
)

Fine-tuning the modle with trainer API

1
2
3
from transformers import DataCollatorForTokenClassification
data_collator = DataClollatorForTokenClassification(tokenizer = tokenizer)
#这里没有对齐的值默认打上了-100的标签

Metrics 度量指标

利用seqeval进行度量该指标

1
2
3
4
5
6
7
8
9
import evalutate
metric = evaluate.load("seqeval")

labels = raw_datasets["train"][0]["ner_tags"]
labels = [label_names[i] for i in labels]
#这里相当于把原先的偏移寻址转换成了正常的可读入字符串的形式
predictions = labels.copy()
predictions[2] = "0"
metric.compute(predictions= [predictions],reference = [labels])

计算度量函数

1
2
3
4
5
6
7
8
import numpy as np
def compute_metrcis(eval_preds):
logis,labels = eval_preds
predictions = np.argmax(logits,axis =-1)#true 是真的值,prediction是一个以训练结果导出来的值
true_labels = [[label_namse[l] for l in label if l != -100] for label in labels] # 啥?
true_predictions = [ [label_name[p] for (p,l) in zip(prediction,label) if l != -100] for prediction,label in zip(predictions,labels)]
all_metrics = metrci.compute(predictions = ture_predictions, references = true_labels)

defining the Model

设置一个相反的字典

1
2
id2label = {i:label for i, label in enumerate(label_names)}
lable2id = {v,k for k,v in id2label.items()}
在传入相关的模型
1
2
3
4
5
6
from transformers import AutoModelForTokenClassification
model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label = id2label
label2id = label2id
)

Fine-tuning the model

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
from huggingface_hub import notebook_login
notebook_login()
#调用训练参数
from transformes import TrainingArguments
args = TrainingArguments(
"bert-finetune-ner",
evaluation_strategy = "epoch",
save_strategy = "epoch",
learning_rate = 2e05,
num_train_epochs= 3
weight_decay= 0.01.
push_to_hub = True,
)
#构建一个trainer
from transformers import Trainer

trainer = Trainer(
model = model,
args =args,
train_dataset = tokenized_datasets["train"],
eval_dataset = tokenized_datasets["validation"],
data_collator = data_collator,
compyte_metrics= compute_metrics,
tokenizer = tokenizer,
)
trainer.train()
trainer.push_to_hub(commit_message= "Training complete")

以上的模型在每一次训练的时候,都会上传到hub中

传统的训练流程举例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from torch.utils.data import DataLoader

train_dataloader = DataLoader(
tokenized_datasets["train"],
shuffle = True,
collate_fn = data_collator,
batch_size = 8,

)
eval_dataloader = DataLoader(
tokenized_datasets["validation"],
collate_fn= data_collator,
batch_size = 8
)

#构建模型

model = AutoModelForTokenXlassification.from_pretrained(
model_checkpoint,
id2label = id2label,
label2id = label2id,
)

from torch.optim import AdamW
optimizer = AdamW(model.parameters(),lr= 2e-5)

#加速器模块

from accelerate import Accelerator
accelerator = Accelerator()
modle,optimizer,train_dataloader,eval_dataloader = accelerator.prepare(
modle,optimizer,train_dataloader,eval_dataloader
)

#???
from transformers import get_scheduler
num_train_epochs = 3
num_update_steps_per_epoch = len(train_dataloader)
num_training_steps = num_train_epochs * num_update_steps_per_epoch #全部训练完的epoch
lr_scheduler = get_scheduler(
"linear",
optimizer = optimizer.
num_warmup_steps = 0,
num_training_steps = num_training_steps
)

上传到仓库

1
2
3
4
from huggingface_hub import Repository,get_full_repo_name
model_name = "bert-finetune-ner-accelerate"
repo_name = get_full_repo_name(model_name)#加上原来的地址.

Train loop

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

def postprocess(predictions,labels):
predictions = predictions.detach().cpu().clone().numpy()
labels = labels.detach().cpu().clone().numpy()
#从GPU中拷贝数据并转换为numpy数据

true_labels = [ [label_names[l] for l in label if l != 100] for label in labels]
true_predictions = [ [label_names[p] for (p,l) in zip(prediction,label) if l != -100]
for prediction , label in zip(predictions, labels)]
return true_labels,true_predictions

#训练中
from tqdm.auto import tqdm
import torch
progress_bar = tqdm((range(num_training_steps)))

for epoch in range(num_train_epochs):
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)

model.eval()
for batch in eval_dataloader:
with torch.no_grad():
outputs = model(**batch)

predictions = outputs/logits.argmax(dim=-1)
labels = batch["labels"]

部分跳过🤣👉🏻🤡

Fine-tuning a masked language model

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from transformers import AutoModelForMaskedLM
model_checkpoint = "distilbert-base-uncased"
model = AutoModelForMaskedLM.from_pretrained(model_Checkpoint)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)

import torch
inputs = tokenizer(text,return_tensors= "pt")#按照pytorch张量输出
token_logits = modle(**inputs).logits
mask_token_index = torch.where(inputs[input_ids]== tokenizer.mask_token_id)[1]
mask_token_logits = token_logits[0,mask_token_index,:]
top_5_token = torch.topk(mask_token_logits,5,dim = 1).indices[0],tolist()

from datasets import load_dataset

imdb_dataset = load_dataset("imdb")
sample = imdb_dataset["train"].shuffle(seed=42).select(range(3))
#注意可以通过数据集的特定部分的.shuffle来选择打乱数据集

def tokenize_function(examples):
result = tokenizer(examples["text"])
if tokenizer.is_fast:
result["word_ids"] = [result.word_ids(i) for i in range(len(result["input_ids"]))] #word_ids相当于分词器映射到了第几个单词上面 这里相当于对于不同的句子组建立了不同的值
return result

tokenizer.model_max_length # 分词器最大的文本容纳量

#连锁
tokenized_samples = tokenized_datasets["train"][:3]
for idx,sample in enumerate(tokenized_samples["input_ids"]):
print(f ">>{ids}")


concatenated_examples = {
#以下相当于把多个不同的列表合成为一个共同的列表
k:sum(tokenized_samples[k],[]) for k in examples.keys()#生成字典的键的值
}# k是一系列的键
total_length = len(concatenated_examples["input_ids"]) #951

Chunk = {
k:[t[i:i+chunk_size] for i in range(0,total_length,chunk_size)]
for k ,t in concatenated_exampls.items() #相当于元素的整合再按照chunk_size的切割
}



同时对于最后一个chunk出现的不均匀的情况,采用直接填充,或者直接丢弃的方法

1
2
3
4
5
6
7
8
9
10
11
12
13
14
def group_texts(examples):
concatenated_examples = {k:sum(examples[k],[]) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
total_length = (total_length // chunk_size) *chunk_size

result = {
k:[t[i:i+chunk_size]for i in range(0,total_legth,chunk_size)] for k,t inconcatenated_examples.items()
}

result["labels"]= result["input_ids"].copy()
return result

lm_datasets = tokenzied_datasets.map(group_texts,batched = True)

Fine-tuning DistilBERt with the Trainer API

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from transformer import DataCollatorForlanguageModeling
data_collator = DataCollatorForLanguageModeling(tokenizer = tokenizer,mlm_probability = 0.15 )

samples = [lm_dataset["train"][i] for i in range(2)]
for sample in samples:
_ = sample.popp("word_ids") #这个会弹出key对应的value 并删除这个键对值

import collections
import numpy as np
from transformers import default_data_collator

ww_prolbability = 0.2

def whole_word_masking_data_collator(features):
#features是最外的字典
#feature是 键对值
for feature in features:
word_ids = feature.pop("word_ids")
mapping = collections.defaultdict(list) #mapping内部元素是一种列表
current_word_index = -1
current_word = None:
for idx,word_idx in enumerate(word_ids):
if word_id is not None:
if word_id != current_word:
current_word = word_id
#单词数量加1
current_word_index += 1
mapping[current_word_index].append(idx)#在当前单词的位置上映射对应的token的位置
# 随机的遮蔽相关的字符
mask = np.random.binomial(1,wwm_probability,(len(mapping))) #在mapping上的每一个元素都做二项分布,概率有wwm生成,同时生成一个列表 含有1,0等元素
#这里满足了一整个单词遮蔽的方式


#同时原来的labels和input_ids都是保留一样的值
input_ids = feature["input_ids"]
labels = feautre["labels"]
new_labels = [-100] *len(labels) #创建一个长度为len 每一个元素都是[ -100]的列表 ,
for word_id in np.where(mask)[0]: #当作布尔行代数来看
word_id = word_id.item()
for idx in mapping[word_id]:
new_labels[idx] = labels[idx] #其他的默认是-100? 这里保留着遮蔽的那些词?
input_ids[idx] = tokenizer.mask_token_id #这里应该是相当于遮蔽了?

feature["labels"] = new_labels

return default_data_collator(features)

train_size =10_000
test_size = int(0.1 * train_size) #int() 表示类型转换为整数

downsampled_dataset = lm_datasets["train"].train_test_split(
train_size = train_size, test_size=test_size, seed = 42
)

训练数据库

给trainer定义参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
from transfromers import TrainingArguments

batch_size = 64
logging_steps = len(downsampled_dataset["train"]) //batch_size
model_name = model_checkpoint.split("/")[-1] #按照/的形式切割,并取出最后一个值
training_args = TrainingArguments(
output_dir = f"{model_name}-finetune-immdb",
overwrite_output_dir = True,
evaluation_strategy = "epocoh",
learning_rate = 2e-5,
weight_decay = 0.01,
per_device_train_batch_size = batch_size,
per_device_eval_batch_size = batch_size,
push_to_hub = True,
fp16 = True,
logging_steps = logging_steps,
)

from transformers import Trainer
trainer = Trainer(
model = model,
args = training_args,
train_dataset = downsampled_dataset["train"],
eval_dataselt = downsampled_dataset["test"],
data_collator = data_collator,
tokenizer = tokenizer,
)

def insert_random_mask(batch):
features = [dict(zip(batch,t)) for t in zip(*batch.values())]
#zip(*batch.value())会对一个key的东西压缩成为一个元组通常,它用于将多个列表或序列的元素按照相同索引位置配对在一起。
#t相当于是一个元组, 用每一个key和元组中的元素进行配对
masked_inputs = data_collator(features)
return {"masked_"+k:v.numpy()for k,v in masked_inputs.items()}
#这个相当于在原字典之前加上“masked_"的前缀

downsampled_dataset = downsampled_dataset.remove_columns(["word_ids"])
eval_dataset = downsampled_dataset["test"].map(
insert_random_mask,
batched = True,
remove_columns = downsampled_dataset["test"].column_names,
)

eval_dataset = eval_dataset.rename_columns(
{
"masked_input_ids": "input_ids",
"masked_attention_mask": "attention_mask",
"masked_labels": "labels",
}
)

from torch.utils.data import DataLoader
from transformers import default_data_collator

batch_size =64
train_dataloader = DataLoader(
downsampled_dataset["train"],
shuffle = True,
batch_size = batch_size,
collate_fn = data_collator
)

eval_dataloader = DataLoader(
eval_dataset,batch_size = batch_size.collate_fn = default_data_collator
)

model = AutoModelForMaskedLM.from_pretrained(model_checkpoint)
from torch.optim import AdamW
optimizer = AdamW(model.parameters(),lr= 5e-5)

from accelerate import Accelerator

accelerator = Accelerator()
model,optimizer,train_dataloader,eval_dataloader = accelerator.prepare(
model,optimizer,train_dataloader,eval_dataloader
)

Translation

loaddata

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from datasets import load_dataset
raw_datasets = load_dataset("kde4", lang1="en", lang2="fr")
split_datasets = raw_datasets["train"].train_test_split(train_size=0.9, seed=20) #选择训练集和测试集

split_datasets["validation"] = split_datasets.pop("test") #重命名

split_datasets["train"][1]["translation"] #两种id 和translation->字典 同时en,fr

from transformers import pipeline

model_checkpoint = "Helsinki-NLP/opus-mt-en-fr"
translator = pipeline("translation", model=model_checkpoint)
translator("Default to expanded threads")

from transformers import AutoTokenizer

model_checkpoint = "Helsinki-NLP/opus-mt-en-fr"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="pt")

#tokenizer完成

en_sentence = split_datasets["train"][1]["translation"]["en"]
fr_sentence = split_datasets["train"][1]["translation"]["fr"]

inputs = tokenizer(en_sentence, text_target=fr_sentence)

#input结果生成
{'input_ids': [47591, 12, 9842, 19634, 9, 0], 'attention_mask': [1, 1, 1, 1, 1, 1], 'labels': [577, 5891, 2, 3184, 16, 2542, 5, 1710, 0]}
#没有理解错误的情况下:input_ids应该用的是对应的每一个单词在语料库的位置,labels对应的法语的位置 #同时这个注意要指定参数
print(tokenizer.convert_ids_to_tokens(inputs["labels"])) #这里用的是把id转换成tokens的一种函数才能合理的看出是什么东西

max_length = 128

def preprocess_function(examples):
inputs = [ex["en"] for ex in examples["translation"]]
targets = [ex["fr"] for ex in examples["translation"]]
#切割
model_inputs = tokenizer(
inputs, text_target=targets, max_length=max_length, truncation=True
)
return model_inputs

from transformers import AutoModelForSeq2SeqLM

model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
from transformers import DataCollatorForSeq2Seq

data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
#the padding value used to pad the labels should be -100 and not the padding token of the tokenizer, to make sure those padded values are ignored in the loss computation. it takes the tokenizer used to preprocess the inputs, but it also takes the model. This is because this data collator will also be responsible for preparing the decoder input IDs, which are shifted versions of the labels with a special token at the beginning. Since this shift is done slightly differently for different architectures, the DataCollatorForSeq2Seq needs to know the model object:

#for ex
batch = data_collator([tokenized_datasets["train"][i] for i in range(1, 3)])
batch.keys()
dict_keys(['attention_mask', 'input_ids', 'labels', 'decoder_input_ids'])
batch["labels"]
batch["decoder_input_ids"] # see that they are shifted versions of the labels:

Metrics

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import evaluate
metric = evaluate.load("sacrebleu")
predictions = [
"This plugin lets you translate web pages between several languages automatically."
]
references = [
[
"This plugin allows you to automatically translate web pages between several languages."
]
]
# the predictions should be a list of sentences, but the references should be a list of lists of sentences.

metric.compute(predictions=predictions, references=references)#调用函数评判翻译结果的好坏

import numpy as np
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds,tuple):#检查对象类型是不是元组 //一种相关的函数
preds = preds[0]

decoded_preds = tokenizer.batch_decode(preds,skip_special_toknes = True) #这里应该是已经完成了替换,所以不需要在这里进行更改
labes = np.where(labels != -100,labels,tokenizer.pad_token_id) #分别代表了检索的值,在哪检索,不符合的值替换成什么
decoded_labels = tokenizer.batch_decode(labels,skip_special_tokens =True)

decoded_preds = [pred.strip() for pred in decoded_preds] #去除字符串两侧的空白字符(包括空格、制表符、换行符等
decoded_labels = [[label.strip()] for label in decoded_labels]

result = metric.compute(predictions=decoded_preds, references=decoded_labels) #计算出结果
return {"bleu": result["score"]}

评估函数

Fine-tuning the model

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from transformers import Seq2SeqTrainingArguments

args = Seq2SeqTrainingArguments(
f"marian-finetuned-kde4-en-to-fr",
evaluation_strategy="no",
save_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=32,
per_device_eval_batch_size=64,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=3,
predict_with_generate=True,
fp16=True,
push_to_hub=True,
)

from transformers import Seq2SeqTrainingArguments

args = Seq2SeqTrainingArguments(
f"marian-finetuned-kde4-en-to-fr",
evaluation_strategy="no",
save_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=32,
per_device_eval_batch_size=64,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=3,
predict_with_generate=True,
fp16=True,#在GPU上加速训练
push_to_hub=True,
)
from transformers import Seq2SeqTrainer

trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)

summation

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
english_dataset.set_format("pandas")#转换数据类型
english_df = english_dataset["train"][:]
# Show counts for top 20 products
english_df["product_category"].value_counts()[:20]#统计出现次数最多的两个

#数据过滤
def filter_books(example):
return (
example["product_category"] == "book"
or example["product_category"] == "digital_ebook_purchase"
)

english_dataset.reset_format() #重新设置数据的格式
spanish_books = spanish_dataset.filter(filter_books) #应该是利用数据的格式,按照返回值来过滤数据
english_books = english_dataset.filter(filter_books)

1
2
3
4
5
6
7
8
9
10
11
12
13
from datasets import concatenate_datasets, DatasetDict

books_dataset = DatasetDict()

for split in english_books.keys():
books_dataset[split] = concatenate_datasets(
[english_books[split], spanish_books[split]]
) #这里是取出相关的标签 然后创建一致的量.
books_dataset[split] = books_dataset[split].shuffle(seed=42)

# Peek at a few examples
show_samples(books_dataset) #books混入两种不同语言的数据集

查看内容

对于总结来说,过滤掉减短的总结是一件很重要的事情,否则会引起这种短总结的bias

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
books_dataset = books_dataset.filter(lambda x: len(x["review_title"].split()) > 2)#按照里面值为1的方式进行过滤

from transformers import AutoTokenizer

model_checkpoint = "google/mt5-small"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)

#输出结果
{'input_ids': [336, 259, 28387, 11807, 287, 62893, 295, 12507, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1]}

tokenizer.convert_ids_to_tokens(inputs.input_ids) #分词转换的语句
['▁I', '▁', 'loved', '▁reading', '▁the', '▁Hung', 'er', '▁Games', '</s>']

max_input_length = 512
max_target_length = 30

#这里没有看懂....

def preprocess_function(examples):
model_inputs = tokenizer(
examples["review_body"],
max_length=max_input_length,
truncation=True,
)
labels = tokenizer(
examples["review_title"], max_length=max_target_length, truncation=True
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs

create a strong, yet simple baseline!

A common baseline for text summarization is to simply take the first three sentences of an article, often called the lead-3 baseline.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import nltk

nltk.download("punkt")

from nltk.tokenize import sent_tokenize

def three_sentence_summary(text):
return "\n".join(sent_tokenize(text)[:3])


print(three_sentence_summary(books_dataset["train"][1]["review_body"]))

def evaluate_baseline(dataset, metric):
summaries = [three_sentence_summary(text) for text in dataset["review_body"]]
return metric.compute(predictions=summaries, references=dataset["review_title"])

#相当于用文本的前三句话作为评判的标准

import pandas as pd

score = evaluate_baseline(books_dataset["validation"], rouge_score)
rouge_names = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
rouge_dict = dict((rn, round(score[rn].mid.fmeasure * 100, 2)) for rn in rouge_names)
rouge_dict

LangChain

chain

起到了串联不同的agent的作用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""

memory: BaseMemory
callbacks: Callbacks

def __call__(
self,
inputs: Any,
return_only_outputs: bool = False,
callbacks: Callbacks = None,
) -> Dict[str, Any]:
...

#use it
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate

llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(#这里应该是创建一个prompt的模版
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
from langchain.chains import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)#传入两个参数

# Run the chain only specifying the input variable.
print(chain.run("colorful socks"))


from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
human_message_prompt = HumanMessagePromptTemplate(
prompt=PromptTemplate(
template="What is a good name for a company that makes {product}?",
input_variables=["product"],
)
)
chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt])
chat = ChatOpenAI(temperature=0.9)
chain = LLMChain(llm=chat, prompt=chat_prompt_template)
print(chain.run("colorful socks"))

# call chain
chat = ChatOpenAI(temperature=0)
prompt_template = "Tell me a {adjective} joke"
llm_chain = LLMChain(llm=chat, prompt=PromptTemplate.from_template(prompt_template))

llm_chain(inputs={"adjective": "corny"})#生成后填入input
llm_chain("colorful socks")

# 同时也可以选择apply的方式,通过填入一个列表,里面分别是这些字典

input_list = [
{"product": "socks"},
{"product": "computer"},
{"product": "shoes"}
]

llm_chain.apply(input_list)
llm_chain.generate(input_list)#相似的功能,但是内容类型不同
llm_chain("corny", return_only_outputs=True)#取消
llm_chain.run({"adjective": "corny"})#等价于填入这个运行,run里面必须填入字典
llm_chain.predict(product="colorful socks")#这里必须填入关键词进行完成,如下

template = """Tell me a {adjective} joke about {subject}."""
prompt = PromptTemplate(template=template, input_variables=["adjective", "subject"])
llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0))

llm_chain.predict(adjective="sad", subject="ducks")

# 语法分析parsing the outputs

#必须带上parsing的信号
from langchain.output_parsers import CommaSeparatedListOutputParser

output_parser = CommaSeparatedListOutputParser()
template = """List all the colors in a rainbow"""
prompt = PromptTemplate(template=template, input_variables=[], output_parser=output_parser)
llm_chain = LLMChain(prompt=prompt, llm=llm)

llm_chain.predict()
llm_chain.predict_and_parse()


modules

prompts

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from langchain import PromptTemplate
prompt_template = PromptTemplate.from_template(
"Tell me a {adjective} joke about {content}."
)#引号加入抽象内容 format可以放入实体
prompt_template.format(adjective="funny", content="chickens")

invalid_prompt = PromptTemplate(
input_variables=["adjective"],#可以从这里买你的字典内容进行比较
template="Tell me a {adjective} joke about {content}."
)

#chat prompt template


template = ChatPromptTemplate.from_messages([
("system", "You are a helpful AI bot. Your name is {name}."),
("human", "Hello, how are you doing?"),
("ai", "I'm doing well, thanks!"),
("human", "{user_input}"),#多轮交流?
])

messages = template.format_messages(
name="Bob",
user_input="What is your name?"#.format_messages可以用来补充信息
)

# few-shot prompt templates
examples = [
{
"question": "Who lived longer, Muhammad Ali or Alan Turing?",
"answer":
"""
Are follow up questions needed here: Yes.
Follow up: How old was Muhammad Ali when he died?
Intermediate answer: Muhammad Ali was 74 years old when he died.
Follow up: How old was Alan Turing when he died?
Intermediate answer: Alan Turing was 41 years old when he died.
So the final answer is: Muhammad Ali
"""
}] #examples表现形式为列表中嵌入相关的字典
example_prompt = PromptTemplate(input_variables=["question", "answer"], template="Question: {question}\n{answer}")

print(example_prompt.format(**examples[0]))## **在python中传递了字典中的健对值

prompt = FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
suffix="Question: {input}", #添加一个后缀
input_variables=["input"],#加入一个input作为一个输出变量,放在后面用来填入
)

print(prompt.format(input="Who was the father of Mary Ball Washington?"))#同时format是直接把这个字符串用来返回

# 通过exampleselector完成相关的操作
from langchain.prompts.example_selector import SemanticSimilarityExampleSelector
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
example_selector = SemanticSimilarityExampleSelector.from_examples(
# This is the list of examples available to select from.
examples,
# This is the embedding class used to produce embeddings which are used to measure semantic similarity.
OpenAIEmbeddings(),
# This is the VectorStore class that is used to store the embeddings and do a similarity search over.
Chroma,
# This is the number of examples to produce.
k=1
)

# Few-shot examples for chat models - API
langchain.prompts.few_shot.FewShotChatMessagePromptTemplate 类
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},#相当于两个
]
example_prompt = ChatPromptTemplate.from_messages(
[('human', '{input}'), ('ai', '{output}')]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
examples=examples,
# This is a prompt template used to format each individual example.
example_prompt=example_prompt,
)
final_prompt = ChatPromptTemplate.from_messages(
[
('system', 'You are a helpful AI Assistant'),
few_shot_prompt,
('human', '{input}'),
]
)

#root chain


memory

store

呃呃,感觉这个单元有点像存在第三方库里面?

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
#通过chatmessagehistorty类来完成相关操作
from langchain.memory import ChatMessageHistory
history = ChatMessageHistory()
history.add_user_message("HI")
history.add_ai_message("what's up?")
history.messages
[HumanMessage(content='hi!', additional_kwargs={}),
AIMessage(content='whats up?', additional_kwargs={})]

#for start
from langchain.memory import ConversationBufferjMemory
memory = ConversationBufferMemory()
memory = ConversationBufferMemory(memory_key="chat_history")#把history的键值更改
memory.chat_memory.add_user_message("hi!")
memory.chat_memory.add_ai_message("what's up?")
memory.load_memory_variables({})#同时可以发现里面放入的是字典同时含有history以及AI: 可以使用

memory = ConversationBufferMemory(return_messages=True)
memory.chat_memory.add_user_message("hi!")
memory.chat_memory.add_ai_message("what's up?")
#return list of memory
#可以通过input_key以及output_key实现参数相关的方案

# end to end
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory


llm = OpenAI(temperature=0)
# Notice that "chat_history" is present in the prompt template
template = """You are a nice chatbot having a conversation with a human.

Previous conversation:
{chat_history}

New human question: {question}
Response:"""
prompt = PromptTemplate.from_template(template)
# Notice that we need to align the `memory_key`
memory = ConversationBufferMemory(memory_key="chat_history")
conversation = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory
)
# Notice that we just pass in the `question` variables - `chat_history` gets populated by memory
conversation({"question": "hi"})

# Memory in LLMChain
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate

template = """You are a chatbot having a conversation with a human.

{chat_history}
Human: {human_input}
Chatbot:"""

prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template
)
#这个应该代表的是可以在里面输入变量,同时通过指明input_variables为外部变量,可以在之后添加进去
memory = ConversationBufferMemory(memory_key="chat_history")
llm = OpenAI()
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory,
)
#langchain提供了memort接口,应该可以自动链接prompt里的数据以及memory里的数据且标签为chat_history
llm_chain.predict(human_input="Hi there my friend") # 这里通过字符串的方式进行预测


# 增加记忆LLM
from langchain.chat_models import ChatOpenAI
from langchain.schema import SystemMessage
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder

prompt = ChatPromptTemplate.from_messages([# 组合数据的形式
SystemMessage(content="You are a chatbot having a conversation with a human."), # The persistent system prompt
MessagesPlaceholder(variable_name="chat_history"), # Where the memory will be stored.记忆存储的key
HumanMessagePromptTemplate.from_template("{human_input}"), # Where the human input will injected
])

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)


# Memory in the Multi-Input Chain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings.cohere import CohereEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch
from langchain.vectorstores import Chroma
from langchain.docstore.document import Document

with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)

embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_texts(
texts, embeddings, metadatas=[{"source": i} for i in range(len(texts))]
)# 按照source texts每一个单元都作为嵌入数据处理
query = "What did the president say about Justice Breyer"
docs = docsearch.similarity_search(query) # 数据库可以直接使用其处理 .similarity_search()检索

from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory

template = """You are a chatbot having a conversation with a human.

Given the following extracted parts of a long document and a question, create a final answer.

{context}

{chat_history}
Human: {human_input}
Chatbot:"""

prompt = PromptTemplate(
input_variables=["chat_history", "human_input", "context"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history", input_key="human_input")#
chain = load_qa_chain(
OpenAI(temperature=0), chain_type="stuff", memory=memory, prompt=prompt
)
# memory in agent

search = GoogleSearchAPIWrapper()
tools = [
Tool(#?
name="Search",#ming ming
func=search.run,#函数功能
description="useful for when you need to answer questions about current events",
)
]#可以使用的功能


prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"

{chat_history}
Question: {input}
{agent_scratchpad}"""
#
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)# ? 可以直接嵌入前缀以及后缀吗

memory = ConversationBufferMemory(memory_key="chat_history")#按照之前的

llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)#代理人, 工具
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory
)#memory 重新嵌入agent_chain




retrieve 检索器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240

# quick start!


#document loader - ex for csv->以逗号作为分隔的文件
from langchain.document_loaders.csv_loader import CSVLoader
loader = CSVLoader(file_path = './example_data/mllb_teams_2012.csv')
data = loader.load()

with open('../../state_of_the_union.txt') as f:
state_of_the_union = f.read()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show.
chunk_size = 100,#最长数量
chunk_overlap = 20,#重叠数量
length_function = len,
add_start_index = True,#切割后的开始字符在原来的数组中的位置
)

#---以上略去了一些其他的切割方式

#Lost in the middle: The problem with long contexts:When models must access relevant information in the middle of long contexts, they tend to ignore the provided documents
#采用重新打乱的方式完成(检索之后)
import os
import chromadb
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.document_transformers import (
LongContextReorder,
)
from langchain.chains import StuffDocumentsChain, LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI

# Get embeddings.
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")

texts = [
"Basquetball is a great sport.",
"Fly me to the moon is one of my favourite songs.",
"The Celtics are my favourite team.",
"This is a document about the Boston Celtics",
"I simply love going to the movies",
"The Boston Celtics won the game by 20 points",
"This is just a random text.",
"Elden Ring is one of the best games in the last 15 years.",
"L. Kornet is one of the best Celtics players.",
"Larry Bird was an iconic NBA player.",
]

# Create a retriever
retriever = Chroma.from_texts(texts, embedding=embeddings).as_retriever(
search_kwargs={"k": 10}
)
query = "What can you tell me about the Celtics?"

# Get relevant documents ordered by relevance score
docs = retriever.get_relevant_documents(query)
docs
#这个b东西穿模了吧

#embedding
from langchain.embeddings import OpenAIEmbeddings

embeddings_model = OpenAIEmbeddings()#直接设置
embeddings = embeddings_model.embed_documents(
[
"Hi there!",
"Oh, hello!",
"What's your name?",
"My friends call me World",
"Hello World!"
]
)#分解的时候支持列表里的多个字符串的形式
len(embeddings), len(embeddings[0])#表示有多少行的变量长度

embedded_query = embeddings_model.embed_query("What was the name mentioned in the conversation?")
embedded_query[:5]#询问以及其他的含有不同的表征形式

#支持caching技术
#TODO
#Vector stores
#存储embedding向量同时之后用于查询的方式
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS

# Load the document, split it into chunks, embed each chunk and load it into the vector store.
raw_documents = TextLoader('../../../state_of_the_union.txt').load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)#已经完成相关的切割了
db = FAISS.from_documents(documents, OpenAIEmbeddings())
#相似性的查询
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
#vector查询
embedding_vector = OpenAIEmbeddings().embed_query(query)
docs = db.similarity_search_by_vector(embedding_vector)
print(docs[0].page_content)
#结果一致

#retrieve QUICK START
from abc import ABC, abstractmethod
from typing import Any, List
from langchain.schema import Document
from langchain.callbacks.manager import Callbacks

class BaseRetriever(ABC):
...
def get_relevant_documents(
self, query: str, *, callbacks: Callbacks = None, **kwargs: Any
) -> List[Document]:
"""Retrieve documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: Callback manager or list of callbacks
Returns:
List of relevant documents
"""
...

async def aget_relevant_documents(
self, query: str, *, callbacks: Callbacks = None, **kwargs: Any
) -> List[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: string to find relevant documents for
callbacks: Callback manager or list of callbacks
Returns:
List of relevant documents
"""
...

from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.document_loaders import TextLoader
loader = TextLoader('../state_of_the_union.txt', encoding='utf8')
from langchain.indexes import VectorstoreIndexCreator
index = VectorstoreIndexCreator().from_loaders([loader])#
query = "What did the president say about Ketanji Brown Jackson"
index.query_with_sources(query)#返回相关的字符串
query = "What did the president say about Ketanji Brown Jackson"
index.query_with_sources(query)#字典 含有详细的相关的文本
index.query("Summarize the general content of this document.", retriever_kwargs={"search_kwargs": {"filter": {"source": "../state_of_the_union.txt"}}})#同时可以选择过滤的方式


#muliquery use


# Build a sample vectorDB
from langchain.vectorstores import Chroma
from langchain.document_loaders import WebBaseLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter

# Load blog post
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()

# Split
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
splits = text_splitter.split_documents(data)

# VectorDB
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=splits, embedding=embedding)
#chroma表示split用embedding处理后存入数据库
from langchain.chat_models import ChatOpenAI
from langchain.retrievers.multi_query import MultiQueryRetriever

question = "What are the approaches to Task Decomposition?"
llm = ChatOpenAI(temperature=0)
retriever_from_llm = MultiQueryRetriever.from_llm(
retriever=vectordb.as_retriever(), llm=llm
)
unique_docs = retriever_from_llm.get_relevant_documents(query=question)#表示依据询问,连续产生相关的疑问来进行检索
len(unique_docs)
from typing import List
from langchain import LLMChain
from pydantic import BaseModel, Field#数据验证库 可以定义不同名字的类
from langchain.prompts import PromptTemplate
from langchain.output_parsers import PydanticOutputParser


# Output parser will split the LLM result into a list of queries
class LineList(BaseModel):
# "lines" is the key (attribute name) of the parsed output
lines: List[str] = Field(description="Lines of text")


class LineListOutputParser(PydanticOutputParser):
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)

def parse(self, text: str) -> LineList:
lines = text.strip().split("\n")
return LineList(lines=lines)


output_parser = LineListOutputParser()

QUERY_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an AI language model assistant. Your task is to generate five
different versions of the given user question to retrieve relevant documents from a vector
database. By generating multiple perspectives on the user question, your goal is to help
the user overcome some of the limitations of the distance-based similarity search.
Provide these alternative questions seperated by newlines.
Original question: {question}""",
)
llm = ChatOpenAI(temperature=0)

# Chain
llm_chain = LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)

# Other inputs
question = "What are the approaches to Task Decomposition?"

# 按时间检索完成
scoring = semantic_similarity + (1.0 - decay_rate) ^ hours_passed
#每一次access都需要更新自己的记忆
import faiss

from datetime import datetime, timedelta
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.schema import Document
from langchain.vectorstores import FAISS

# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})#vectorstore 用于描述存入的方式
retriever = TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, decay_rate=.0000000000000000000000001, k=1)

NLP&&LLM
http://example.com/2023/08/20/NLP/
作者
NGC6302
发布于
2023年8月20日
许可协议