Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
f029b188
Commit
f029b188
authored
Jun 29, 2023
by
StyleZhang
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'feat/refact-common-layout' into deploy/dev
parents
ecf114f9
98c2a899
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
7 additions
and
3 deletions
+7
-3
llm_generator.py
api/core/generator/llm_generator.py
+4
-1
generate_conversation_summary_task.py
api/tasks/generate_conversation_summary_task.py
+1
-1
remove_document_from_index_task.py
api/tasks/remove_document_from_index_task.py
+2
-1
No files found.
api/core/generator/llm_generator.py
View file @
f029b188
...
@@ -45,7 +45,7 @@ class LLMGenerator:
...
@@ -45,7 +45,7 @@ class LLMGenerator:
prompt
=
CONVERSATION_SUMMARY_PROMPT
prompt
=
CONVERSATION_SUMMARY_PROMPT
prompt_with_empty_context
=
prompt
.
format
(
context
=
''
)
prompt_with_empty_context
=
prompt
.
format
(
context
=
''
)
prompt_tokens
=
TokenCalculator
.
get_num_tokens
(
model
,
prompt_with_empty_context
)
prompt_tokens
=
TokenCalculator
.
get_num_tokens
(
model
,
prompt_with_empty_context
)
rest_tokens
=
llm_constant
.
max_context_token_length
[
model
]
-
prompt_tokens
-
max_tokens
rest_tokens
=
llm_constant
.
max_context_token_length
[
model
]
-
prompt_tokens
-
max_tokens
-
1
context
=
''
context
=
''
for
message
in
messages
:
for
message
in
messages
:
...
@@ -56,6 +56,9 @@ class LLMGenerator:
...
@@ -56,6 +56,9 @@ class LLMGenerator:
if
rest_tokens
-
TokenCalculator
.
get_num_tokens
(
model
,
context
+
message_qa_text
)
>
0
:
if
rest_tokens
-
TokenCalculator
.
get_num_tokens
(
model
,
context
+
message_qa_text
)
>
0
:
context
+=
message_qa_text
context
+=
message_qa_text
if
not
context
:
return
'[message too long, no summary]'
prompt
=
prompt
.
format
(
context
=
context
)
prompt
=
prompt
.
format
(
context
=
context
)
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
...
...
api/tasks/generate_conversation_summary_task.py
View file @
f029b188
...
@@ -28,7 +28,7 @@ def generate_conversation_summary_task(conversation_id: str):
...
@@ -28,7 +28,7 @@ def generate_conversation_summary_task(conversation_id: str):
try
:
try
:
# get conversation messages count
# get conversation messages count
history_message_count
=
conversation
.
message_count
history_message_count
=
conversation
.
message_count
if
history_message_count
>=
5
:
if
history_message_count
>=
5
and
not
conversation
.
summary
:
app_model
=
conversation
.
app
app_model
=
conversation
.
app
if
not
app_model
:
if
not
app_model
:
return
return
...
...
api/tasks/remove_document_from_index_task.py
View file @
f029b188
...
@@ -41,7 +41,8 @@ def remove_document_from_index_task(document_id: str):
...
@@ -41,7 +41,8 @@ def remove_document_from_index_task(document_id: str):
kw_index
=
IndexBuilder
.
get_index
(
dataset
,
'economy'
)
kw_index
=
IndexBuilder
.
get_index
(
dataset
,
'economy'
)
# delete from vector index
# delete from vector index
vector_index
.
delete_by_document_id
(
document
.
id
)
if
vector_index
:
vector_index
.
delete_by_document_id
(
document
.
id
)
# delete from keyword index
# delete from keyword index
segments
=
db
.
session
.
query
(
DocumentSegment
)
.
filter
(
DocumentSegment
.
document_id
==
document
.
id
)
.
all
()
segments
=
db
.
session
.
query
(
DocumentSegment
)
.
filter
(
DocumentSegment
.
document_id
==
document
.
id
)
.
all
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment