Commit 281f1133 authored by jyong's avatar jyong

improve prompt

parent 5f754407
...@@ -235,12 +235,12 @@ class DatasetIndexingEstimateApi(Resource): ...@@ -235,12 +235,12 @@ class DatasetIndexingEstimateApi(Resource):
raise NotFound("File not found.") raise NotFound("File not found.")
indexing_runner = IndexingRunner() indexing_runner = IndexingRunner()
response = indexing_runner.file_indexing_estimate(file_details, args['process_rule'], args['process_rule']) response = indexing_runner.file_indexing_estimate(file_details, args['process_rule'], args['doc_form'])
elif args['info_list']['data_source_type'] == 'notion_import': elif args['info_list']['data_source_type'] == 'notion_import':
indexing_runner = IndexingRunner() indexing_runner = IndexingRunner()
response = indexing_runner.notion_indexing_estimate(args['info_list']['notion_info_list'], response = indexing_runner.notion_indexing_estimate(args['info_list']['notion_info_list'],
args['process_rule'], args['process_rule']) args['process_rule'], args['doc_form'])
else: else:
raise ValueError('Data source type not support') raise ValueError('Data source type not support')
return response, 200 return response, 200
......
...@@ -229,7 +229,7 @@ class IndexingRunner: ...@@ -229,7 +229,7 @@ class IndexingRunner:
dataset_document.stopped_at = datetime.datetime.utcnow() dataset_document.stopped_at = datetime.datetime.utcnow()
db.session.commit() db.session.commit()
def file_indexing_estimate(self, file_details: List[UploadFile], tmp_processing_rule: dict, doc_from: str = None) -> dict: def file_indexing_estimate(self, file_details: List[UploadFile], tmp_processing_rule: dict, doc_form: str = None) -> dict:
""" """
Estimate the indexing for the document. Estimate the indexing for the document.
""" """
...@@ -261,7 +261,7 @@ class IndexingRunner: ...@@ -261,7 +261,7 @@ class IndexingRunner:
tokens += TokenCalculator.get_num_tokens(self.embedding_model_name, tokens += TokenCalculator.get_num_tokens(self.embedding_model_name,
self.filter_string(document.page_content)) self.filter_string(document.page_content))
if doc_from and doc_from == 'qa_model': if doc_form and doc_form == 'qa_model':
if len(preview_texts) > 0: if len(preview_texts) > 0:
# qa model document # qa model document
response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0]) response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment