-
Notifications
You must be signed in to change notification settings - Fork 13
/
Copy pathrun_embedding_to_vector_all_doc.py
50 lines (39 loc) · 1.63 KB
/
run_embedding_to_vector_all_doc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import os
from rag.load_data import DocsLoader
from rag.embedding_db import EmbeddingVectorDB
from rag.doc_split import TextSpliter
import time
# 按照多少字分割,允许重叠多少字
chunk_size = 1000
chunk_overlap = 20
# 本地调
embedding_model_path = r'D:\Python_project\NLP\model\bge-small-zh-v1.5'
device = 'cpu'
data_path = ['data/pdf', 'data/word', 'data/markdown', 'data/txt',]
vector_db_path = f'data/all_doc_vector/all_doc_vector_{chunk_size}_metadata'
if __name__ == '__main__':
# 向量模型加载
embedding_model = EmbeddingVectorDB.load_local_embedding_model(embedding_model_path, device=device)
# 加载按照目录加载数据
docs = []
for path in data_path:
if 'word' in path:
for file in os.listdir(path):
docs.extend(DocsLoader().word_loader(os.path.join(path, file)))
elif 'pdf' in path:
docs.extend(DocsLoader().pdf_loader(path, is_directory=True))
else:
docs.extend(DocsLoader().file_directory_loader(path))
# 分块
all_split_doc = []
for doc in docs:
split_docs = TextSpliter.text_split_by_manychar_or_charnum(doc, chunk_size=chunk_size, chunk_overlap=chunk_overlap)
# 加上元数据,标题作为元数据
for _ in split_docs:
_.metadata = {'title': doc.metadata['source'].split()[-1].replace('.txt', '')}
all_split_doc.extend(split_docs)
# 保存到向量
start = time.time()
db = EmbeddingVectorDB.create_chroma_vector(all_split_doc, vector_db_path, embedding_model)
end = time.time()
print(f'向量库创建完成,耗时:{end - start}s')