Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
767b526b
Commit
767b526b
authored
Jul 25, 2023
by
John Wang
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
feat: add llm timeout
parent
943481a8
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
24 additions
and
4 deletions
+24
-4
streamable_azure_chat_open_ai.py
api/core/llm/streamable_azure_chat_open_ai.py
+6
-1
streamable_azure_open_ai.py
api/core/llm/streamable_azure_open_ai.py
+5
-1
streamable_chat_anthropic.py
api/core/llm/streamable_chat_anthropic.py
+3
-0
streamable_chat_open_ai.py
api/core/llm/streamable_chat_open_ai.py
+5
-1
streamable_open_ai.py
api/core/llm/streamable_open_ai.py
+5
-1
No files found.
api/core/llm/streamable_azure_chat_open_ai.py
View file @
767b526b
...
...
@@ -2,7 +2,7 @@ from langchain.callbacks.manager import Callbacks, CallbackManagerForLLMRun
from
langchain.chat_models.openai
import
_convert_dict_to_message
from
langchain.schema
import
BaseMessage
,
LLMResult
,
ChatResult
,
ChatGeneration
from
langchain.chat_models
import
AzureChatOpenAI
from
typing
import
Optional
,
List
,
Dict
,
Any
from
typing
import
Optional
,
List
,
Dict
,
Any
,
Tuple
,
Union
from
pydantic
import
root_validator
...
...
@@ -10,6 +10,11 @@ from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class
StreamableAzureChatOpenAI
(
AzureChatOpenAI
):
request_timeout
:
Optional
[
Union
[
float
,
Tuple
[
float
,
float
]]]
=
(
5.0
,
120.0
)
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries
:
int
=
2
"""Maximum number of retries to make when generating."""
@
root_validator
()
def
validate_environment
(
cls
,
values
:
Dict
)
->
Dict
:
"""Validate that api key and python package exists in environment."""
...
...
api/core/llm/streamable_azure_open_ai.py
View file @
767b526b
from
langchain.callbacks.manager
import
Callbacks
from
langchain.llms
import
AzureOpenAI
from
langchain.schema
import
LLMResult
from
typing
import
Optional
,
List
,
Dict
,
Mapping
,
Any
from
typing
import
Optional
,
List
,
Dict
,
Mapping
,
Any
,
Union
,
Tuple
from
pydantic
import
root_validator
...
...
@@ -11,6 +11,10 @@ from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class
StreamableAzureOpenAI
(
AzureOpenAI
):
openai_api_type
:
str
=
"azure"
openai_api_version
:
str
=
""
request_timeout
:
Optional
[
Union
[
float
,
Tuple
[
float
,
float
]]]
=
(
5.0
,
120.0
)
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries
:
int
=
2
"""Maximum number of retries to make when generating."""
@
root_validator
()
def
validate_environment
(
cls
,
values
:
Dict
)
->
Dict
:
...
...
api/core/llm/streamable_chat_anthropic.py
View file @
767b526b
from
typing
import
List
,
Optional
,
Any
,
Dict
from
httpx
import
Timeout
from
langchain.callbacks.manager
import
Callbacks
from
langchain.chat_models
import
ChatAnthropic
from
langchain.schema
import
BaseMessage
,
LLMResult
,
SystemMessage
,
AIMessage
,
HumanMessage
,
ChatMessage
...
...
@@ -13,6 +14,8 @@ class StreamableChatAnthropic(ChatAnthropic):
Wrapper around Anthropic's large language model.
"""
default_request_timeout
:
Optional
[
float
]
=
Timeout
(
timeout
=
120.0
,
connect
=
5.0
)
@
root_validator
()
def
prepare_params
(
cls
,
values
:
Dict
)
->
Dict
:
values
[
'model_name'
]
=
values
.
get
(
'model'
)
...
...
api/core/llm/streamable_chat_open_ai.py
View file @
767b526b
...
...
@@ -3,7 +3,7 @@ import os
from
langchain.callbacks.manager
import
Callbacks
from
langchain.schema
import
BaseMessage
,
LLMResult
from
langchain.chat_models
import
ChatOpenAI
from
typing
import
Optional
,
List
,
Dict
,
Any
from
typing
import
Optional
,
List
,
Dict
,
Any
,
Union
,
Tuple
from
pydantic
import
root_validator
...
...
@@ -11,6 +11,10 @@ from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class
StreamableChatOpenAI
(
ChatOpenAI
):
request_timeout
:
Optional
[
Union
[
float
,
Tuple
[
float
,
float
]]]
=
(
5.0
,
120.0
)
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries
:
int
=
2
"""Maximum number of retries to make when generating."""
@
root_validator
()
def
validate_environment
(
cls
,
values
:
Dict
)
->
Dict
:
...
...
api/core/llm/streamable_open_ai.py
View file @
767b526b
...
...
@@ -2,7 +2,7 @@ import os
from
langchain.callbacks.manager
import
Callbacks
from
langchain.schema
import
LLMResult
from
typing
import
Optional
,
List
,
Dict
,
Any
,
Mapping
from
typing
import
Optional
,
List
,
Dict
,
Any
,
Mapping
,
Union
,
Tuple
from
langchain
import
OpenAI
from
pydantic
import
root_validator
...
...
@@ -10,6 +10,10 @@ from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class
StreamableOpenAI
(
OpenAI
):
request_timeout
:
Optional
[
Union
[
float
,
Tuple
[
float
,
float
]]]
=
(
5.0
,
120.0
)
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
max_retries
:
int
=
2
"""Maximum number of retries to make when generating."""
@
root_validator
()
def
validate_environment
(
cls
,
values
:
Dict
)
->
Dict
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment