Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
cce70695
Commit
cce70695
authored
Jun 28, 2023
by
crazywoola
Browse files
Options
Browse Files
Download
Plain Diff
resolve: conflict
parents
638f2178
0d92bb07
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
35 additions
and
32 deletions
+35
-32
config.py
api/config.py
+1
-1
llm_generator.py
api/core/generator/llm_generator.py
+11
-8
indexing_runner.py
api/core/indexing_runner.py
+4
-4
prompts.py
api/core/prompt/prompts.py
+4
-4
docker-compose.yaml
docker/docker-compose.yaml
+3
-3
index.tsx
web/app/components/app/overview/settings/index.tsx
+11
-11
package.json
web/package.json
+1
-1
No files found.
api/config.py
View file @
cce70695
...
...
@@ -79,7 +79,7 @@ class Config:
self
.
CONSOLE_URL
=
get_env
(
'CONSOLE_URL'
)
self
.
API_URL
=
get_env
(
'API_URL'
)
self
.
APP_URL
=
get_env
(
'APP_URL'
)
self
.
CURRENT_VERSION
=
"0.3.
5
"
self
.
CURRENT_VERSION
=
"0.3.
6
"
self
.
COMMIT_SHA
=
get_env
(
'COMMIT_SHA'
)
self
.
EDITION
=
"SELF_HOSTED"
self
.
DEPLOY_ENV
=
get_env
(
'DEPLOY_ENV'
)
...
...
api/core/generator/llm_generator.py
View file @
cce70695
...
...
@@ -2,7 +2,7 @@ import logging
from
langchain
import
PromptTemplate
from
langchain.chat_models.base
import
BaseChatModel
from
langchain.schema
import
HumanMessage
,
OutputParserException
from
langchain.schema
import
HumanMessage
,
OutputParserException
,
BaseMessage
from
core.constant
import
llm_constant
from
core.llm.llm_builder
import
LLMBuilder
...
...
@@ -23,10 +23,10 @@ class LLMGenerator:
@
classmethod
def
generate_conversation_name
(
cls
,
tenant_id
:
str
,
query
,
answer
):
prompt
=
CONVERSATION_TITLE_PROMPT
prompt
=
prompt
.
format
(
query
=
query
,
answer
=
answer
)
prompt
=
prompt
.
format
(
query
=
query
)
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
tenant_id
=
tenant_id
,
model_name
=
generate_base_model
,
model_name
=
'gpt-3.5-turbo'
,
max_tokens
=
50
)
...
...
@@ -40,11 +40,12 @@ class LLMGenerator:
@
classmethod
def
generate_conversation_summary
(
cls
,
tenant_id
:
str
,
messages
):
max_tokens
=
200
model
=
'gpt-3.5-turbo'
prompt
=
CONVERSATION_SUMMARY_PROMPT
prompt_with_empty_context
=
prompt
.
format
(
context
=
''
)
prompt_tokens
=
TokenCalculator
.
get_num_tokens
(
generate_base_
model
,
prompt_with_empty_context
)
rest_tokens
=
llm_constant
.
max_context_token_length
[
generate_base_
model
]
-
prompt_tokens
-
max_tokens
prompt_tokens
=
TokenCalculator
.
get_num_tokens
(
model
,
prompt_with_empty_context
)
rest_tokens
=
llm_constant
.
max_context_token_length
[
model
]
-
prompt_tokens
-
max_tokens
context
=
''
for
message
in
messages
:
...
...
@@ -52,14 +53,14 @@ class LLMGenerator:
continue
message_qa_text
=
"Human:"
+
message
.
query
+
"
\n
AI:"
+
message
.
answer
+
"
\n
"
if
rest_tokens
-
TokenCalculator
.
get_num_tokens
(
generate_base_
model
,
context
+
message_qa_text
)
>
0
:
if
rest_tokens
-
TokenCalculator
.
get_num_tokens
(
model
,
context
+
message_qa_text
)
>
0
:
context
+=
message_qa_text
prompt
=
prompt
.
format
(
context
=
context
)
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
tenant_id
=
tenant_id
,
model_name
=
generate_base_
model
,
model_name
=
model
,
max_tokens
=
max_tokens
)
...
...
@@ -102,7 +103,7 @@ class LLMGenerator:
llm
:
StreamableOpenAI
=
LLMBuilder
.
to_llm
(
tenant_id
=
tenant_id
,
model_name
=
generate_base_model
,
model_name
=
'gpt-3.5-turbo'
,
temperature
=
0
,
max_tokens
=
256
)
...
...
@@ -114,6 +115,8 @@ class LLMGenerator:
try
:
output
=
llm
(
query
)
if
isinstance
(
output
,
BaseMessage
):
output
=
output
.
content
questions
=
output_parser
.
parse
(
output
)
except
Exception
:
logging
.
exception
(
"Error generating suggested questions after answer"
)
...
...
api/core/indexing_runner.py
View file @
cce70695
...
...
@@ -346,10 +346,10 @@ class IndexingRunner:
return
text_docs
def
filter_string
(
self
,
text
):
text
=
text
.
replace
(
'<|'
,
'<'
)
text
=
text
.
replace
(
'|>'
,
'>'
)
pattern
=
re
.
compile
(
'[
\x00
-
\x08\x0B\x0C\x0E
-
\x1F\x7F\x80
-
\xFF
]'
)
return
pattern
.
sub
(
''
,
text
)
text
=
re
.
sub
(
r'<\|'
,
'<'
,
text
)
text
=
re
.
sub
(
r'\|>'
,
'>'
,
text
)
text
=
re
.
sub
(
r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\x80-\xFF]'
,
''
,
text
)
return
text
def
_get_splitter
(
self
,
processing_rule
:
DatasetProcessRule
)
->
TextSplitter
:
"""
...
...
api/core/prompt/prompts.py
View file @
cce70695
CONVERSATION_TITLE_PROMPT
=
(
"Human:{
{query}
}
\n
-----
\n
"
"Human:{
query
}
\n
-----
\n
"
"Help me summarize the intent of what the human said and provide a title, the title should not exceed 20 words.
\n
"
"If the human said is conducted in Chinese, you should return a Chinese title.
\n
"
"If the human said is conducted in English, you should return an English title.
\n
"
...
...
@@ -19,7 +19,7 @@ CONVERSATION_SUMMARY_PROMPT = (
INTRODUCTION_GENERATE_PROMPT
=
(
"I am designing a product for users to interact with an AI through dialogue. "
"The Prompt given to the AI before the conversation is:
\n\n
"
"```
\n
{
{prompt}
}
\n
```
\n\n
"
"```
\n
{
prompt
}
\n
```
\n\n
"
"Please generate a brief introduction of no more than 50 words that greets the user, based on this Prompt. "
"Do not reveal the developer's motivation or deep logic behind the Prompt, "
"but focus on building a relationship with the user:
\n
"
...
...
@@ -27,13 +27,13 @@ INTRODUCTION_GENERATE_PROMPT = (
MORE_LIKE_THIS_GENERATE_PROMPT
=
(
"-----
\n
"
"{
{original_completion}
}
\n
"
"{
original_completion
}
\n
"
"-----
\n\n
"
"Please use the above content as a sample for generating the result, "
"and include key information points related to the original sample in the result. "
"Try to rephrase this information in different ways and predict according to the rules below.
\n\n
"
"-----
\n
"
"{
{prompt}
}
\n
"
"{
prompt
}
\n
"
)
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
=
(
...
...
docker/docker-compose.yaml
View file @
cce70695
...
...
@@ -2,7 +2,7 @@ version: '3.1'
services
:
# API service
api
:
image
:
langgenius/dify-api:0.3.
5
image
:
langgenius/dify-api:0.3.
6
restart
:
always
environment
:
# Startup mode, 'api' starts the API server.
...
...
@@ -110,7 +110,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker
:
image
:
langgenius/dify-api:0.3.
5
image
:
langgenius/dify-api:0.3.
6
restart
:
always
environment
:
# Startup mode, 'worker' starts the Celery worker for processing the queue.
...
...
@@ -156,7 +156,7 @@ services:
# Frontend web application.
web
:
image
:
langgenius/dify-web:0.3.
5
image
:
langgenius/dify-web:0.3.
6
restart
:
always
environment
:
EDITION
:
SELF_HOSTED
...
...
web/app/components/app/overview/settings/index.tsx
View file @
cce70695
...
...
@@ -83,17 +83,6 @@ const SettingsModal: FC<ISettingsModalProps> = ({
return
(
<>
{
showEmojiPicker
&&
<
EmojiPicker
onSelect=
{
(
icon
,
icon_background
)
=>
{
console
.
log
(
icon
,
icon_background
)
setEmoji
({
icon
,
icon_background
})
setShowEmojiPicker
(
false
)
}
}
onClose=
{
()
=>
{
setEmoji
({
icon
:
'🤖'
,
icon_background
:
'#FFEAD5'
})
setShowEmojiPicker
(
false
)
}
}
/>
}
<
Modal
title=
{
t
(
`${prefixSettings}.title`
)
}
isShow=
{
isShow
}
...
...
@@ -161,6 +150,17 @@ const SettingsModal: FC<ISettingsModalProps> = ({
<
Button
className=
'mr-2 flex-shrink-0'
onClick=
{
onHide
}
>
{
t
(
'common.operation.cancel'
)
}
</
Button
>
<
Button
type=
'primary'
className=
'flex-shrink-0'
onClick=
{
onClickSave
}
loading=
{
saveLoading
}
>
{
t
(
'common.operation.save'
)
}
</
Button
>
</
div
>
{
showEmojiPicker
&&
<
EmojiPicker
onSelect=
{
(
icon
,
icon_background
)
=>
{
console
.
log
(
icon
,
icon_background
)
setEmoji
({
icon
,
icon_background
})
setShowEmojiPicker
(
false
)
}
}
onClose=
{
()
=>
{
setEmoji
({
icon
:
'🤖'
,
icon_background
:
'#FFEAD5'
})
setShowEmojiPicker
(
false
)
}
}
/>
}
</
Modal
>
</>
...
...
web/package.json
View file @
cce70695
{
"name"
:
"dify-web"
,
"version"
:
"0.3.
5
"
,
"version"
:
"0.3.
6
"
,
"private"
:
true
,
"scripts"
:
{
"dev"
:
"next dev"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment