Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
5e692d72
Commit
5e692d72
authored
Jun 28, 2023
by
StyleZhang
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'main' into feat/refact-common-layout
parents
d05f7910
9d98669e
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
36 additions
and
25 deletions
+36
-25
config.py
api/config.py
+1
-1
multi_dataset_router_chain.py
api/core/chain/multi_dataset_router_chain.py
+12
-4
llm_generator.py
api/core/generator/llm_generator.py
+11
-8
indexing_runner.py
api/core/indexing_runner.py
+4
-4
prompts.py
api/core/prompt/prompts.py
+4
-4
docker-compose.yaml
docker/docker-compose.yaml
+3
-3
package.json
web/package.json
+1
-1
No files found.
api/config.py
View file @
5e692d72
...
@@ -79,7 +79,7 @@ class Config:
...
@@ -79,7 +79,7 @@ class Config:
self
.
CONSOLE_URL
=
get_env
(
'CONSOLE_URL'
)
self
.
CONSOLE_URL
=
get_env
(
'CONSOLE_URL'
)
self
.
API_URL
=
get_env
(
'API_URL'
)
self
.
API_URL
=
get_env
(
'API_URL'
)
self
.
APP_URL
=
get_env
(
'APP_URL'
)
self
.
APP_URL
=
get_env
(
'APP_URL'
)
self
.
CURRENT_VERSION
=
"0.3.
5
"
self
.
CURRENT_VERSION
=
"0.3.
6
"
self
.
COMMIT_SHA
=
get_env
(
'COMMIT_SHA'
)
self
.
COMMIT_SHA
=
get_env
(
'COMMIT_SHA'
)
self
.
EDITION
=
"SELF_HOSTED"
self
.
EDITION
=
"SELF_HOSTED"
self
.
DEPLOY_ENV
=
get_env
(
'DEPLOY_ENV'
)
self
.
DEPLOY_ENV
=
get_env
(
'DEPLOY_ENV'
)
...
...
api/core/chain/multi_dataset_router_chain.py
View file @
5e692d72
import
math
import
math
import
re
from
typing
import
Mapping
,
List
,
Dict
,
Any
,
Optional
from
typing
import
Mapping
,
List
,
Dict
,
Any
,
Optional
from
langchain
import
PromptTemplate
from
langchain
import
PromptTemplate
...
@@ -178,13 +179,20 @@ class MultiDatasetRouterChain(Chain):
...
@@ -178,13 +179,20 @@ class MultiDatasetRouterChain(Chain):
route
=
self
.
router_chain
.
route
(
inputs
)
route
=
self
.
router_chain
.
route
(
inputs
)
if
not
route
.
destination
:
destination
=
''
if
route
.
destination
:
pattern
=
r'\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b'
match
=
re
.
search
(
pattern
,
route
.
destination
,
re
.
IGNORECASE
)
if
match
:
destination
=
match
.
group
()
if
not
destination
:
return
{
"text"
:
''
}
return
{
"text"
:
''
}
elif
route
.
destination
in
self
.
dataset_tools
:
elif
destination
in
self
.
dataset_tools
:
return
{
"text"
:
self
.
dataset_tools
[
route
.
destination
]
.
run
(
return
{
"text"
:
self
.
dataset_tools
[
destination
]
.
run
(
route
.
next_inputs
[
'input'
]
route
.
next_inputs
[
'input'
]
)}
)}
else
:
else
:
raise
ValueError
(
raise
ValueError
(
f
"Received invalid destination chain name '{
route.
destination}'"
f
"Received invalid destination chain name '{destination}'"
)
)
api/core/generator/llm_generator.py
View file @
5e692d72
...
@@ -2,7 +2,7 @@ import logging
...
@@ -2,7 +2,7 @@ import logging
from
langchain
import
PromptTemplate
from
langchain
import
PromptTemplate
from
langchain.chat_models.base
import
BaseChatModel
from
langchain.chat_models.base
import
BaseChatModel
from
langchain.schema
import
HumanMessage
,
OutputParserException
from
langchain.schema
import
HumanMessage
,
OutputParserException
,
BaseMessage
from
core.constant
import
llm_constant
from
core.constant
import
llm_constant
from
core.llm.llm_builder
import
LLMBuilder
from
core.llm.llm_builder
import
LLMBuilder
...
@@ -23,10 +23,10 @@ class LLMGenerator:
...
@@ -23,10 +23,10 @@ class LLMGenerator:
@
classmethod
@
classmethod
def
generate_conversation_name
(
cls
,
tenant_id
:
str
,
query
,
answer
):
def
generate_conversation_name
(
cls
,
tenant_id
:
str
,
query
,
answer
):
prompt
=
CONVERSATION_TITLE_PROMPT
prompt
=
CONVERSATION_TITLE_PROMPT
prompt
=
prompt
.
format
(
query
=
query
,
answer
=
answer
)
prompt
=
prompt
.
format
(
query
=
query
)
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
tenant_id
=
tenant_id
,
tenant_id
=
tenant_id
,
model_name
=
generate_base_model
,
model_name
=
'gpt-3.5-turbo'
,
max_tokens
=
50
max_tokens
=
50
)
)
...
@@ -40,11 +40,12 @@ class LLMGenerator:
...
@@ -40,11 +40,12 @@ class LLMGenerator:
@
classmethod
@
classmethod
def
generate_conversation_summary
(
cls
,
tenant_id
:
str
,
messages
):
def
generate_conversation_summary
(
cls
,
tenant_id
:
str
,
messages
):
max_tokens
=
200
max_tokens
=
200
model
=
'gpt-3.5-turbo'
prompt
=
CONVERSATION_SUMMARY_PROMPT
prompt
=
CONVERSATION_SUMMARY_PROMPT
prompt_with_empty_context
=
prompt
.
format
(
context
=
''
)
prompt_with_empty_context
=
prompt
.
format
(
context
=
''
)
prompt_tokens
=
TokenCalculator
.
get_num_tokens
(
generate_base_
model
,
prompt_with_empty_context
)
prompt_tokens
=
TokenCalculator
.
get_num_tokens
(
model
,
prompt_with_empty_context
)
rest_tokens
=
llm_constant
.
max_context_token_length
[
generate_base_
model
]
-
prompt_tokens
-
max_tokens
rest_tokens
=
llm_constant
.
max_context_token_length
[
model
]
-
prompt_tokens
-
max_tokens
context
=
''
context
=
''
for
message
in
messages
:
for
message
in
messages
:
...
@@ -52,14 +53,14 @@ class LLMGenerator:
...
@@ -52,14 +53,14 @@ class LLMGenerator:
continue
continue
message_qa_text
=
"Human:"
+
message
.
query
+
"
\n
AI:"
+
message
.
answer
+
"
\n
"
message_qa_text
=
"Human:"
+
message
.
query
+
"
\n
AI:"
+
message
.
answer
+
"
\n
"
if
rest_tokens
-
TokenCalculator
.
get_num_tokens
(
generate_base_
model
,
context
+
message_qa_text
)
>
0
:
if
rest_tokens
-
TokenCalculator
.
get_num_tokens
(
model
,
context
+
message_qa_text
)
>
0
:
context
+=
message_qa_text
context
+=
message_qa_text
prompt
=
prompt
.
format
(
context
=
context
)
prompt
=
prompt
.
format
(
context
=
context
)
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
tenant_id
=
tenant_id
,
tenant_id
=
tenant_id
,
model_name
=
generate_base_
model
,
model_name
=
model
,
max_tokens
=
max_tokens
max_tokens
=
max_tokens
)
)
...
@@ -102,7 +103,7 @@ class LLMGenerator:
...
@@ -102,7 +103,7 @@ class LLMGenerator:
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
tenant_id
=
tenant_id
,
tenant_id
=
tenant_id
,
model_name
=
generate_base_model
,
model_name
=
'gpt-3.5-turbo'
,
temperature
=
0
,
temperature
=
0
,
max_tokens
=
256
max_tokens
=
256
)
)
...
@@ -114,6 +115,8 @@ class LLMGenerator:
...
@@ -114,6 +115,8 @@ class LLMGenerator:
try
:
try
:
output
=
llm
(
query
)
output
=
llm
(
query
)
if
isinstance
(
output
,
BaseMessage
):
output
=
output
.
content
questions
=
output_parser
.
parse
(
output
)
questions
=
output_parser
.
parse
(
output
)
except
Exception
:
except
Exception
:
logging
.
exception
(
"Error generating suggested questions after answer"
)
logging
.
exception
(
"Error generating suggested questions after answer"
)
...
...
api/core/indexing_runner.py
View file @
5e692d72
...
@@ -346,10 +346,10 @@ class IndexingRunner:
...
@@ -346,10 +346,10 @@ class IndexingRunner:
return
text_docs
return
text_docs
def
filter_string
(
self
,
text
):
def
filter_string
(
self
,
text
):
text
=
text
.
replace
(
'<|'
,
'<'
)
text
=
re
.
sub
(
r'<\|'
,
'<'
,
text
)
text
=
text
.
replace
(
'|>'
,
'>'
)
text
=
re
.
sub
(
r'\|>'
,
'>'
,
text
)
pattern
=
re
.
compile
(
'[
\x00
-
\x08\x0B\x0C\x0E
-
\x1F\x7F\x80
-
\xFF
]'
)
text
=
re
.
sub
(
r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\x80-\xFF]'
,
''
,
text
)
return
pattern
.
sub
(
''
,
text
)
return
text
def
_get_splitter
(
self
,
processing_rule
:
DatasetProcessRule
)
->
TextSplitter
:
def
_get_splitter
(
self
,
processing_rule
:
DatasetProcessRule
)
->
TextSplitter
:
"""
"""
...
...
api/core/prompt/prompts.py
View file @
5e692d72
CONVERSATION_TITLE_PROMPT
=
(
CONVERSATION_TITLE_PROMPT
=
(
"Human:{
{query}
}
\n
-----
\n
"
"Human:{
query
}
\n
-----
\n
"
"Help me summarize the intent of what the human said and provide a title, the title should not exceed 20 words.
\n
"
"Help me summarize the intent of what the human said and provide a title, the title should not exceed 20 words.
\n
"
"If the human said is conducted in Chinese, you should return a Chinese title.
\n
"
"If the human said is conducted in Chinese, you should return a Chinese title.
\n
"
"If the human said is conducted in English, you should return an English title.
\n
"
"If the human said is conducted in English, you should return an English title.
\n
"
...
@@ -19,7 +19,7 @@ CONVERSATION_SUMMARY_PROMPT = (
...
@@ -19,7 +19,7 @@ CONVERSATION_SUMMARY_PROMPT = (
INTRODUCTION_GENERATE_PROMPT
=
(
INTRODUCTION_GENERATE_PROMPT
=
(
"I am designing a product for users to interact with an AI through dialogue. "
"I am designing a product for users to interact with an AI through dialogue. "
"The Prompt given to the AI before the conversation is:
\n\n
"
"The Prompt given to the AI before the conversation is:
\n\n
"
"```
\n
{
{prompt}
}
\n
```
\n\n
"
"```
\n
{
prompt
}
\n
```
\n\n
"
"Please generate a brief introduction of no more than 50 words that greets the user, based on this Prompt. "
"Please generate a brief introduction of no more than 50 words that greets the user, based on this Prompt. "
"Do not reveal the developer's motivation or deep logic behind the Prompt, "
"Do not reveal the developer's motivation or deep logic behind the Prompt, "
"but focus on building a relationship with the user:
\n
"
"but focus on building a relationship with the user:
\n
"
...
@@ -27,13 +27,13 @@ INTRODUCTION_GENERATE_PROMPT = (
...
@@ -27,13 +27,13 @@ INTRODUCTION_GENERATE_PROMPT = (
MORE_LIKE_THIS_GENERATE_PROMPT
=
(
MORE_LIKE_THIS_GENERATE_PROMPT
=
(
"-----
\n
"
"-----
\n
"
"{
{original_completion}
}
\n
"
"{
original_completion
}
\n
"
"-----
\n\n
"
"-----
\n\n
"
"Please use the above content as a sample for generating the result, "
"Please use the above content as a sample for generating the result, "
"and include key information points related to the original sample in the result. "
"and include key information points related to the original sample in the result. "
"Try to rephrase this information in different ways and predict according to the rules below.
\n\n
"
"Try to rephrase this information in different ways and predict according to the rules below.
\n\n
"
"-----
\n
"
"-----
\n
"
"{
{prompt}
}
\n
"
"{
prompt
}
\n
"
)
)
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
=
(
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
=
(
...
...
docker/docker-compose.yaml
View file @
5e692d72
...
@@ -2,7 +2,7 @@ version: '3.1'
...
@@ -2,7 +2,7 @@ version: '3.1'
services
:
services
:
# API service
# API service
api
:
api
:
image
:
langgenius/dify-api:0.3.
5
image
:
langgenius/dify-api:0.3.
6
restart
:
always
restart
:
always
environment
:
environment
:
# Startup mode, 'api' starts the API server.
# Startup mode, 'api' starts the API server.
...
@@ -110,7 +110,7 @@ services:
...
@@ -110,7 +110,7 @@ services:
# worker service
# worker service
# The Celery worker for processing the queue.
# The Celery worker for processing the queue.
worker
:
worker
:
image
:
langgenius/dify-api:0.3.
5
image
:
langgenius/dify-api:0.3.
6
restart
:
always
restart
:
always
environment
:
environment
:
# Startup mode, 'worker' starts the Celery worker for processing the queue.
# Startup mode, 'worker' starts the Celery worker for processing the queue.
...
@@ -156,7 +156,7 @@ services:
...
@@ -156,7 +156,7 @@ services:
# Frontend web application.
# Frontend web application.
web
:
web
:
image
:
langgenius/dify-web:0.3.
5
image
:
langgenius/dify-web:0.3.
6
restart
:
always
restart
:
always
environment
:
environment
:
EDITION
:
SELF_HOSTED
EDITION
:
SELF_HOSTED
...
...
web/package.json
View file @
5e692d72
{
{
"name"
:
"dify-web"
,
"name"
:
"dify-web"
,
"version"
:
"0.3.
5
"
,
"version"
:
"0.3.
6
"
,
"private"
:
true
,
"private"
:
true
,
"scripts"
:
{
"scripts"
:
{
"dev"
:
"next dev"
,
"dev"
:
"next dev"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment