Commit f029b188 authored by StyleZhang's avatar StyleZhang

Merge branch 'feat/refact-common-layout' into deploy/dev

parents ecf114f9 98c2a899
...@@ -45,7 +45,7 @@ class LLMGenerator: ...@@ -45,7 +45,7 @@ class LLMGenerator:
prompt = CONVERSATION_SUMMARY_PROMPT prompt = CONVERSATION_SUMMARY_PROMPT
prompt_with_empty_context = prompt.format(context='') prompt_with_empty_context = prompt.format(context='')
prompt_tokens = TokenCalculator.get_num_tokens(model, prompt_with_empty_context) prompt_tokens = TokenCalculator.get_num_tokens(model, prompt_with_empty_context)
rest_tokens = llm_constant.max_context_token_length[model] - prompt_tokens - max_tokens rest_tokens = llm_constant.max_context_token_length[model] - prompt_tokens - max_tokens - 1
context = '' context = ''
for message in messages: for message in messages:
...@@ -56,6 +56,9 @@ class LLMGenerator: ...@@ -56,6 +56,9 @@ class LLMGenerator:
if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0: if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0:
context += message_qa_text context += message_qa_text
if not context:
return '[message too long, no summary]'
prompt = prompt.format(context=context) prompt = prompt.format(context=context)
llm: StreamableOpenAI = LLMBuilder.to_llm( llm: StreamableOpenAI = LLMBuilder.to_llm(
......
...@@ -28,7 +28,7 @@ def generate_conversation_summary_task(conversation_id: str): ...@@ -28,7 +28,7 @@ def generate_conversation_summary_task(conversation_id: str):
try: try:
# get conversation messages count # get conversation messages count
history_message_count = conversation.message_count history_message_count = conversation.message_count
if history_message_count >= 5: if history_message_count >= 5 and not conversation.summary:
app_model = conversation.app app_model = conversation.app
if not app_model: if not app_model:
return return
......
...@@ -41,7 +41,8 @@ def remove_document_from_index_task(document_id: str): ...@@ -41,7 +41,8 @@ def remove_document_from_index_task(document_id: str):
kw_index = IndexBuilder.get_index(dataset, 'economy') kw_index = IndexBuilder.get_index(dataset, 'economy')
# delete from vector index # delete from vector index
vector_index.delete_by_document_id(document.id) if vector_index:
vector_index.delete_by_document_id(document.id)
# delete from keyword index # delete from keyword index
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document.id).all() segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document.id).all()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment