Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
D
dify
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ai-tech
dify
Commits
4d7caa34
Commit
4d7caa34
authored
Mar 12, 2024
by
takatost
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add llm node test
parent
85646632
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
134 additions
and
2 deletions
+134
-2
__init__.py
api/tests/integration_tests/workflow/nodes/__init__.py
+0
-0
test_llm.py
api/tests/integration_tests/workflow/nodes/test_llm.py
+132
-0
test_template_transform.py
...tegration_tests/workflow/nodes/test_template_transform.py
+2
-2
__init__.py
api/tests/unit_tests/core/workflow/nodes/__init__.py
+0
-0
No files found.
api/tests/integration_tests/workflow/nodes/__init__.py
0 → 100644
View file @
4d7caa34
api/tests/integration_tests/workflow/nodes/test_llm.py
0 → 100644
View file @
4d7caa34
import
os
from
unittest.mock
import
MagicMock
import
pytest
from
core.app.entities.app_invoke_entities
import
ModelConfigWithCredentialsEntity
from
core.entities.provider_configuration
import
ProviderModelBundle
,
ProviderConfiguration
from
core.entities.provider_entities
import
SystemConfiguration
,
CustomConfiguration
,
CustomProviderConfiguration
from
core.model_manager
import
ModelInstance
from
core.model_runtime.entities.model_entities
import
ModelType
from
core.model_runtime.model_providers
import
ModelProviderFactory
from
core.workflow.entities.node_entities
import
SystemVariable
from
core.workflow.entities.variable_pool
import
VariablePool
from
core.workflow.nodes.base_node
import
UserFrom
from
core.workflow.nodes.llm.llm_node
import
LLMNode
from
extensions.ext_database
import
db
from
models.provider
import
ProviderType
from
models.workflow
import
WorkflowNodeExecutionStatus
"""FOR MOCK FIXTURES, DO NOT REMOVE"""
from
tests.integration_tests.model_runtime.__mock.openai
import
setup_openai_mock
@
pytest
.
mark
.
parametrize
(
'setup_openai_mock'
,
[[
'chat'
]],
indirect
=
True
)
def
test_execute_llm
(
setup_openai_mock
):
node
=
LLMNode
(
tenant_id
=
'1'
,
app_id
=
'1'
,
workflow_id
=
'1'
,
user_id
=
'1'
,
user_from
=
UserFrom
.
ACCOUNT
,
config
=
{
'id'
:
'llm'
,
'data'
:
{
'title'
:
'123'
,
'type'
:
'llm'
,
'model'
:
{
'provider'
:
'openai'
,
'name'
:
'gpt-3.5.turbo'
,
'mode'
:
'chat'
,
'completion_params'
:
{}
},
'variables'
:
[
{
'variable'
:
'weather'
,
'value_selector'
:
[
'abc'
,
'output'
],
},
{
'variable'
:
'query'
,
'value_selector'
:
[
'sys'
,
'query'
]
}
],
'prompt_template'
:
[
{
'role'
:
'system'
,
'text'
:
'you are a helpful assistant.
\n
today
\'
s weather is {{weather}}.'
},
{
'role'
:
'user'
,
'text'
:
'{{query}}'
}
],
'memory'
:
{
'window'
:
{
'enabled'
:
True
,
'size'
:
2
}
},
'context'
:
{
'enabled'
:
False
},
'vision'
:
{
'enabled'
:
False
}
}
}
)
# construct variable pool
pool
=
VariablePool
(
system_variables
=
{
SystemVariable
.
QUERY
:
'what
\'
s the weather today?'
,
SystemVariable
.
FILES
:
[],
SystemVariable
.
CONVERSATION
:
'abababa'
},
user_inputs
=
{})
pool
.
append_variable
(
node_id
=
'abc'
,
variable_key_list
=
[
'output'
],
value
=
'sunny'
)
credentials
=
{
'openai_api_key'
:
os
.
environ
.
get
(
'OPENAI_API_KEY'
)
}
provider_instance
=
ModelProviderFactory
()
.
get_provider_instance
(
'openai'
)
model_type_instance
=
provider_instance
.
get_model_instance
(
ModelType
.
LLM
)
provider_model_bundle
=
ProviderModelBundle
(
configuration
=
ProviderConfiguration
(
tenant_id
=
'1'
,
provider
=
provider_instance
.
get_provider_schema
(),
preferred_provider_type
=
ProviderType
.
CUSTOM
,
using_provider_type
=
ProviderType
.
CUSTOM
,
system_configuration
=
SystemConfiguration
(
enabled
=
False
),
custom_configuration
=
CustomConfiguration
(
provider
=
CustomProviderConfiguration
(
credentials
=
credentials
)
)
),
provider_instance
=
provider_instance
,
model_type_instance
=
model_type_instance
)
model_instance
=
ModelInstance
(
provider_model_bundle
=
provider_model_bundle
,
model
=
'gpt-3.5-turbo'
)
model_config
=
ModelConfigWithCredentialsEntity
(
model
=
'gpt-3.5-turbo'
,
provider
=
'openai'
,
mode
=
'chat'
,
credentials
=
credentials
,
parameters
=
{},
model_schema
=
model_type_instance
.
get_model_schema
(
'gpt-3.5-turbo'
),
provider_model_bundle
=
provider_model_bundle
)
# Mock db.session.close()
db
.
session
.
close
=
MagicMock
()
node
.
_fetch_model_config
=
MagicMock
(
return_value
=
tuple
([
model_instance
,
model_config
]))
# execute node
result
=
node
.
run
(
pool
)
assert
result
.
status
==
WorkflowNodeExecutionStatus
.
SUCCEEDED
assert
result
.
outputs
[
'text'
]
is
not
None
assert
result
.
outputs
[
'usage'
][
'total_tokens'
]
>
0
api/tests/integration_tests/workflow/nodes/test_template_transform.py
View file @
4d7caa34
import
pytest
from
core.app.entities.app_invoke_entities
import
InvokeFrom
from
core.workflow.entities.variable_pool
import
VariablePool
from
core.workflow.nodes.base_node
import
UserFrom
from
core.workflow.nodes.template_transform.template_transform_node
import
TemplateTransformNode
from
models.workflow
import
WorkflowNodeExecutionStatus
from
tests.integration_tests.workflow.nodes.__mock.code_executor
import
setup_code_executor_mock
...
...
@@ -14,7 +14,7 @@ def test_execute_code(setup_code_executor_mock):
app_id
=
'1'
,
workflow_id
=
'1'
,
user_id
=
'1'
,
user_from
=
InvokeFrom
.
WEB_APP
,
user_from
=
UserFrom
.
END_USER
,
config
=
{
'id'
:
'1'
,
'data'
:
{
...
...
api/tests/unit_tests/core/workflow/nodes/__init__.py
0 → 100644
View file @
4d7caa34
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment