Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
### What problem does this PR solve?

infiniflow#917 
infiniflow#915

### Type of change

- [x] Bug Fix (non-breaking change which fixes an issue)
  • Loading branch information
KevinHuSh authored May 28, 2024
1 parent 92dc521 commit b27485f
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 5 deletions.
2 changes: 1 addition & 1 deletion deepdoc/parser/pdf_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def _naive_vertical_merge(self):
b["text"].strip()[-1] in ",;:'\",、‘“;:-",
len(b["text"].strip()) > 1 and b["text"].strip(
)[-2] in ",;:'\",‘“、;:",
b["text"].strip()[0] in "。;?!?”)),,、:",
b_["text"].strip()[0] in "。;?!?”)),,、:",
]
# features for not concating
feats = [
Expand Down
14 changes: 11 additions & 3 deletions rag/app/naive.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
from rag.nlp import rag_tokenizer, naive_merge, tokenize_table, tokenize_chunks, find_codec
from deepdoc.parser import PdfParser, ExcelParser, DocxParser
from rag.settings import cron_logger
from rag.utils import num_tokens_from_string


class Docx(DocxParser):
def __init__(self):
Expand Down Expand Up @@ -149,8 +151,14 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,
if not l:
break
txt += l
sections = txt.split("\n")
sections = [(l, "") for l in sections if l]
sections = []
for sec in txt.split("\n"):
if num_tokens_from_string(sec) > 10 * parser_config.get("chunk_token_num", 128):
sections.append((sec[:int(len(sec)/2)], ""))
sections.append((sec[int(len(sec)/2):], ""))
else:
sections.append((sec, ""))

callback(0.8, "Finish parsing.")

elif re.search(r"\.doc$", filename, re.IGNORECASE):
Expand All @@ -163,7 +171,7 @@ def chunk(filename, binary=None, from_page=0, to_page=100000,

else:
raise NotImplementedError(
"file type not supported yet(doc, docx, pdf, txt supported)")
"file type not supported yet(pdf, xlsx, doc, docx, txt supported)")

st = timer()
chunks = naive_merge(
Expand Down
2 changes: 1 addition & 1 deletion rag/nlp/rag_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def rkey_(self, line):
def loadDict_(self, fnm):
print("[HUQIE]:Build trie", fnm, file=sys.stderr)
try:
of = open(fnm, "r")
of = open(fnm, "r", encoding='utf-8')
while True:
line = of.readline()
if not line:
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,3 +136,4 @@ BCEmbedding
loguru==0.7.2
umap-learn
fasttext==0.9.2
volcengine
1 change: 1 addition & 0 deletions requirements_dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -124,3 +124,4 @@ ollama==0.1.8
redis==5.0.4
fasttext==0.9.2
umap-learn
volcengine

0 comments on commit b27485f

Please sign in to comment.