Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
1bd0a76a
Unverified
Commit
1bd0a76a
authored
Aug 12, 2023
by
takatost
Committed by
GitHub
Aug 12, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
feat: optimize error raise (#820)
parent
2f179d61
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
20 additions
and
8 deletions
+20
-8
llm_callback_handler.py
api/core/callback_handler/llm_callback_handler.py
+1
-1
llm_generator.py
api/core/generator/llm_generator.py
+9
-4
azure_openai_provider.py
api/core/model_providers/providers/azure_openai_provider.py
+5
-1
generate_conversation_summary_task.py
api/tasks/generate_conversation_summary_task.py
+5
-2
No files found.
api/core/callback_handler/llm_callback_handler.py
View file @
1bd0a76a
...
...
@@ -96,4 +96,4 @@ class LLMCallbackHandler(BaseCallbackHandler):
)
self
.
conversation_message_task
.
save_message
(
llm_message
=
self
.
llm_message
,
by_stopped
=
True
)
else
:
logging
.
exception
(
error
)
logging
.
debug
(
"on_llm_error:
%
s"
,
error
)
api/core/generator/llm_generator.py
View file @
1bd0a76a
...
...
@@ -2,6 +2,7 @@ import logging
from
langchain.schema
import
OutputParserException
from
core.model_providers.error
import
LLMError
from
core.model_providers.model_factory
import
ModelFactory
from
core.model_providers.models.entity.message
import
PromptMessage
,
MessageType
from
core.model_providers.models.entity.model_params
import
ModelKwargs
...
...
@@ -120,8 +121,10 @@ class LLMGenerator:
try
:
output
=
model_instance
.
run
(
prompts
)
questions
=
output_parser
.
parse
(
output
.
content
)
except
Exception
:
logging
.
exception
(
"Error generating suggested questions after answer"
)
except
LLMError
:
questions
=
[]
except
Exception
as
e
:
logging
.
exception
(
e
)
questions
=
[]
return
questions
...
...
@@ -157,10 +160,12 @@ class LLMGenerator:
try
:
output
=
model_instance
.
run
(
prompts
)
rule_config
=
output_parser
.
parse
(
output
.
content
)
except
LLMError
as
e
:
raise
e
except
OutputParserException
:
raise
ValueError
(
'Please give a valid input for intended audience or hoping to solve problems.'
)
except
Exception
:
logging
.
exception
(
"Error generating prompt"
)
except
Exception
as
e
:
logging
.
exception
(
e
)
rule_config
=
{
"prompt"
:
""
,
"variables"
:
[],
...
...
api/core/model_providers/providers/azure_openai_provider.py
View file @
1bd0a76a
...
...
@@ -283,7 +283,11 @@ class AzureOpenAIProvider(BaseModelProvider):
if
obfuscated
:
credentials
[
'openai_api_key'
]
=
encrypter
.
obfuscated_token
(
credentials
[
'openai_api_key'
])
return
credentials
return
{
'openai_api_base'
:
credentials
[
'openai_api_base'
],
'openai_api_key'
:
credentials
[
'openai_api_key'
],
'base_model_name'
:
credentials
[
'base_model_name'
]
}
else
:
if
hosted_model_providers
.
azure_openai
:
return
{
...
...
api/tasks/generate_conversation_summary_task.py
View file @
1bd0a76a
...
...
@@ -6,6 +6,7 @@ from celery import shared_task
from
werkzeug.exceptions
import
NotFound
from
core.generator.llm_generator
import
LLMGenerator
from
core.model_providers.error
import
LLMError
from
extensions.ext_database
import
db
from
models.model
import
Conversation
,
Message
...
...
@@ -42,5 +43,7 @@ def generate_conversation_summary_task(conversation_id: str):
end_at
=
time
.
perf_counter
()
logging
.
info
(
click
.
style
(
'Conversation summary generated: {} latency: {}'
.
format
(
conversation_id
,
end_at
-
start_at
),
fg
=
'green'
))
except
Exception
:
logging
.
exception
(
"generate conversation summary failed"
)
except
LLMError
:
pass
except
Exception
as
e
:
logging
.
exception
(
e
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment