Unverified Commit a29c1f93 authored by Yeuoly's avatar Yeuoly

Merge branch 'main' into feat/enterprise

parents c81e07af 7f891939
# Dify Open Source License # Open Source License
The Dify project is licensed under the Apache License 2.0, with the following additional conditions: Dify is licensed under the Apache License 2.0, with the following additional conditions:
1. Dify is permitted to be used for commercialization, such as using Dify as a "backend-as-a-service" for your other applications, or delivering it to enterprises as an application development platform. However, when the following conditions are met, you must contact the producer to obtain a commercial license: 1. Dify may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. Should the conditions below be met, a commercial license must be obtained from the producer:
a. Multi-tenant SaaS service: Unless explicitly authorized by Dify in writing, you may not use the Dify.AI source code to operate a multi-tenant SaaS service that is similar to the Dify.AI service edition. a. Multi-tenant SaaS service: Unless explicitly authorized by Dify in writing, you may not use the Dify source code to operate a multi-tenant environment.
b. LOGO and copyright information: In the process of using Dify, you may not remove or modify the LOGO or copyright information in the Dify console. - Tenant Definition: Within the context of Dify, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations.
b. LOGO and copyright information: In the process of using Dify's frontend components, you may not remove or modify the LOGO or copyright information in the Dify console or applications. This restriction is inapplicable to uses of Dify that do not involve its frontend components.
Please contact business@dify.ai by email to inquire about licensing matters. Please contact business@dify.ai by email to inquire about licensing matters.
2. As a contributor, you should agree that your contributed code: 2. As a contributor, you should agree that:
a. The producer can adjust the open-source agreement to be more strict or relaxed. a. The producer can adjust the open-source agreement to be more strict or relaxed as deemed necessary.
b. Can be used for commercial purposes, such as Dify's cloud business. b. Your contributed code may be used for commercial purposes, including but not limited to its cloud business operations.
Apart from this, all other rights and restrictions follow the Apache License 2.0. If you need more detailed information, you can refer to the full version of Apache License 2.0. Apart from the specific conditions mentioned above, all other rights and restrictions follow the Apache License 2.0. Detailed information about the Apache License 2.0 can be found at http://www.apache.org/licenses/LICENSE-2.0.
The interactive design of this product is protected by appearance patent. The interactive design of this product is protected by appearance patent.
© 2023 LangGenius, Inc. © 2024 LangGenius, Inc.
---------- ----------
......
...@@ -15,7 +15,7 @@ from libs.rsa import generate_key_pair ...@@ -15,7 +15,7 @@ from libs.rsa import generate_key_pair
from models.account import Tenant from models.account import Tenant
from models.dataset import Dataset, DatasetCollectionBinding, DocumentSegment from models.dataset import Dataset, DatasetCollectionBinding, DocumentSegment
from models.dataset import Document as DatasetDocument from models.dataset import Document as DatasetDocument
from models.model import Account from models.model import Account, App, AppAnnotationSetting, MessageAnnotation
from models.provider import Provider, ProviderModel from models.provider import Provider, ProviderModel
...@@ -125,7 +125,114 @@ def reset_encrypt_key_pair(): ...@@ -125,7 +125,114 @@ def reset_encrypt_key_pair():
@click.command('vdb-migrate', help='migrate vector db.') @click.command('vdb-migrate', help='migrate vector db.')
def vdb_migrate(): @click.option('--scope', default='all', prompt=False, help='The scope of vector database to migrate, Default is All.')
def vdb_migrate(scope: str):
if scope in ['knowledge', 'all']:
migrate_knowledge_vector_database()
if scope in ['annotation', 'all']:
migrate_annotation_vector_database()
def migrate_annotation_vector_database():
"""
Migrate annotation datas to target vector database .
"""
click.echo(click.style('Start migrate annotation data.', fg='green'))
create_count = 0
skipped_count = 0
total_count = 0
page = 1
while True:
try:
# get apps info
apps = db.session.query(App).filter(
App.status == 'normal'
).order_by(App.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for app in apps:
total_count = total_count + 1
click.echo(f'Processing the {total_count} app {app.id}. '
+ f'{create_count} created, {skipped_count} skipped.')
try:
click.echo('Create app annotation index: {}'.format(app.id))
app_annotation_setting = db.session.query(AppAnnotationSetting).filter(
AppAnnotationSetting.app_id == app.id
).first()
if not app_annotation_setting:
skipped_count = skipped_count + 1
click.echo('App annotation setting is disabled: {}'.format(app.id))
continue
# get dataset_collection_binding info
dataset_collection_binding = db.session.query(DatasetCollectionBinding).filter(
DatasetCollectionBinding.id == app_annotation_setting.collection_binding_id
).first()
if not dataset_collection_binding:
click.echo('App annotation collection binding is not exist: {}'.format(app.id))
continue
annotations = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app.id).all()
dataset = Dataset(
id=app.id,
tenant_id=app.tenant_id,
indexing_technique='high_quality',
embedding_model_provider=dataset_collection_binding.provider_name,
embedding_model=dataset_collection_binding.model_name,
collection_binding_id=dataset_collection_binding.id
)
documents = []
if annotations:
for annotation in annotations:
document = Document(
page_content=annotation.question,
metadata={
"annotation_id": annotation.id,
"app_id": app.id,
"doc_id": annotation.id
}
)
documents.append(document)
vector = Vector(dataset, attributes=['doc_id', 'annotation_id', 'app_id'])
click.echo(f"Start to migrate annotation, app_id: {app.id}.")
try:
vector.delete()
click.echo(
click.style(f'Successfully delete vector index for app: {app.id}.',
fg='green'))
except Exception as e:
click.echo(
click.style(f'Failed to delete vector index for app {app.id}.',
fg='red'))
raise e
if documents:
try:
click.echo(click.style(
f'Start to created vector index with {len(documents)} annotations for app {app.id}.',
fg='green'))
vector.create(documents)
click.echo(
click.style(f'Successfully created vector index for app {app.id}.', fg='green'))
except Exception as e:
click.echo(click.style(f'Failed to created vector index for app {app.id}.', fg='red'))
raise e
click.echo(f'Successfully migrated app annotation {app.id}.')
create_count += 1
except Exception as e:
click.echo(
click.style('Create app annotation index error: {} {}'.format(e.__class__.__name__, str(e)),
fg='red'))
continue
click.echo(
click.style(f'Congratulations! Create {create_count} app annotation indexes, and skipped {skipped_count} apps.',
fg='green'))
def migrate_knowledge_vector_database():
""" """
Migrate vector database datas to target vector database . Migrate vector database datas to target vector database .
""" """
......
...@@ -90,7 +90,7 @@ class Config: ...@@ -90,7 +90,7 @@ class Config:
# ------------------------ # ------------------------
# General Configurations. # General Configurations.
# ------------------------ # ------------------------
self.CURRENT_VERSION = "0.5.7" self.CURRENT_VERSION = "0.5.8"
self.COMMIT_SHA = get_env('COMMIT_SHA') self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED" self.EDITION = "SELF_HOSTED"
self.DEPLOY_ENV = get_env('DEPLOY_ENV') self.DEPLOY_ENV = get_env('DEPLOY_ENV')
......
...@@ -129,7 +129,7 @@ class AppListApi(Resource): ...@@ -129,7 +129,7 @@ class AppListApi(Resource):
"No Default System Reasoning Model available. Please configure " "No Default System Reasoning Model available. Please configure "
"in the Settings -> Model Provider.") "in the Settings -> Model Provider.")
else: else:
model_config_dict["model"]["provider"] = default_model_entity.provider model_config_dict["model"]["provider"] = default_model_entity.provider.provider
model_config_dict["model"]["name"] = default_model_entity.model model_config_dict["model"]["name"] = default_model_entity.model
model_configuration = AppModelConfigService.validate_configuration( model_configuration = AppModelConfigService.validate_configuration(
......
...@@ -88,7 +88,7 @@ class ChatMessageTextApi(Resource): ...@@ -88,7 +88,7 @@ class ChatMessageTextApi(Resource):
response = AudioService.transcript_tts( response = AudioService.transcript_tts(
tenant_id=app_model.tenant_id, tenant_id=app_model.tenant_id,
text=request.form['text'], text=request.form['text'],
voice=app_model.app_model_config.text_to_speech_dict.get('voice'), voice=request.form['voice'] if request.form['voice'] else app_model.app_model_config.text_to_speech_dict.get('voice'),
streaming=False streaming=False
) )
......
...@@ -11,7 +11,7 @@ from controllers.console.datasets.error import ( ...@@ -11,7 +11,7 @@ from controllers.console.datasets.error import (
UnsupportedFileTypeError, UnsupportedFileTypeError,
) )
from controllers.console.setup import setup_required from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
from fields.file_fields import file_fields, upload_config_fields from fields.file_fields import file_fields, upload_config_fields
from libs.login import login_required from libs.login import login_required
from services.file_service import ALLOWED_EXTENSIONS, UNSTRUSTURED_ALLOWED_EXTENSIONS, FileService from services.file_service import ALLOWED_EXTENSIONS, UNSTRUSTURED_ALLOWED_EXTENSIONS, FileService
...@@ -39,6 +39,7 @@ class FileApi(Resource): ...@@ -39,6 +39,7 @@ class FileApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
@marshal_with(file_fields) @marshal_with(file_fields)
@cloud_edition_billing_resource_check(resource='documents')
def post(self): def post(self):
# get file from request # get file from request
......
...@@ -85,7 +85,7 @@ class ChatTextApi(InstalledAppResource): ...@@ -85,7 +85,7 @@ class ChatTextApi(InstalledAppResource):
response = AudioService.transcript_tts( response = AudioService.transcript_tts(
tenant_id=app_model.tenant_id, tenant_id=app_model.tenant_id,
text=request.form['text'], text=request.form['text'],
voice=app_model.app_model_config.text_to_speech_dict.get('voice'), voice=request.form['voice'] if request.form['voice'] else app_model.app_model_config.text_to_speech_dict.get('voice'),
streaming=False streaming=False
) )
return {'data': response.data.decode('latin1')} return {'data': response.data.decode('latin1')}
......
...@@ -259,6 +259,7 @@ class ToolApiProviderPreviousTestApi(Resource): ...@@ -259,6 +259,7 @@ class ToolApiProviderPreviousTestApi(Resource):
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
parser.add_argument('tool_name', type=str, required=True, nullable=False, location='json') parser.add_argument('tool_name', type=str, required=True, nullable=False, location='json')
parser.add_argument('provider_name', type=str, required=False, nullable=False, location='json')
parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json') parser.add_argument('credentials', type=dict, required=True, nullable=False, location='json')
parser.add_argument('parameters', type=dict, required=True, nullable=False, location='json') parser.add_argument('parameters', type=dict, required=True, nullable=False, location='json')
parser.add_argument('schema_type', type=str, required=True, nullable=False, location='json') parser.add_argument('schema_type', type=str, required=True, nullable=False, location='json')
...@@ -268,6 +269,7 @@ class ToolApiProviderPreviousTestApi(Resource): ...@@ -268,6 +269,7 @@ class ToolApiProviderPreviousTestApi(Resource):
return ToolManageService.test_api_tool_preview( return ToolManageService.test_api_tool_preview(
current_user.current_tenant_id, current_user.current_tenant_id,
args['provider_name'] if args['provider_name'] else '',
args['tool_name'], args['tool_name'],
args['credentials'], args['credentials'],
args['parameters'], args['parameters'],
......
...@@ -56,6 +56,7 @@ def cloud_edition_billing_resource_check(resource: str, ...@@ -56,6 +56,7 @@ def cloud_edition_billing_resource_check(resource: str,
members = features.members members = features.members
apps = features.apps apps = features.apps
vector_space = features.vector_space vector_space = features.vector_space
documents_upload_quota = features.documents_upload_quota
annotation_quota_limit = features.annotation_quota_limit annotation_quota_limit = features.annotation_quota_limit
if resource == 'members' and 0 < members.limit <= members.size: if resource == 'members' and 0 < members.limit <= members.size:
...@@ -64,6 +65,13 @@ def cloud_edition_billing_resource_check(resource: str, ...@@ -64,6 +65,13 @@ def cloud_edition_billing_resource_check(resource: str,
abort(403, error_msg) abort(403, error_msg)
elif resource == 'vector_space' and 0 < vector_space.limit <= vector_space.size: elif resource == 'vector_space' and 0 < vector_space.limit <= vector_space.size:
abort(403, error_msg) abort(403, error_msg)
elif resource == 'documents' and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
# The api of file upload is used in the multiple places, so we need to check the source of the request from datasets
source = request.args.get('source')
if source == 'datasets':
abort(403, error_msg)
else:
return view(*args, **kwargs)
elif resource == 'workspace_custom' and not features.can_replace_logo: elif resource == 'workspace_custom' and not features.can_replace_logo:
abort(403, error_msg) abort(403, error_msg)
elif resource == 'annotation' and 0 < annotation_quota_limit.limit < annotation_quota_limit.size: elif resource == 'annotation' and 0 < annotation_quota_limit.limit < annotation_quota_limit.size:
......
...@@ -87,7 +87,7 @@ class TextApi(Resource): ...@@ -87,7 +87,7 @@ class TextApi(Resource):
tenant_id=app_model.tenant_id, tenant_id=app_model.tenant_id,
text=args['text'], text=args['text'],
end_user=end_user, end_user=end_user,
voice=app_model.app_model_config.text_to_speech_dict.get('voice'), voice=args['voice'] if args['voice'] else app_model.app_model_config.text_to_speech_dict.get('voice'),
streaming=args['streaming'] streaming=args['streaming']
) )
......
...@@ -28,6 +28,7 @@ class DocumentAddByTextApi(DatasetApiResource): ...@@ -28,6 +28,7 @@ class DocumentAddByTextApi(DatasetApiResource):
"""Resource for documents.""" """Resource for documents."""
@cloud_edition_billing_resource_check('vector_space', 'dataset') @cloud_edition_billing_resource_check('vector_space', 'dataset')
@cloud_edition_billing_resource_check('documents', 'dataset')
def post(self, tenant_id, dataset_id): def post(self, tenant_id, dataset_id):
"""Create document by text.""" """Create document by text."""
parser = reqparse.RequestParser() parser = reqparse.RequestParser()
...@@ -153,6 +154,7 @@ class DocumentUpdateByTextApi(DatasetApiResource): ...@@ -153,6 +154,7 @@ class DocumentUpdateByTextApi(DatasetApiResource):
class DocumentAddByFileApi(DatasetApiResource): class DocumentAddByFileApi(DatasetApiResource):
"""Resource for documents.""" """Resource for documents."""
@cloud_edition_billing_resource_check('vector_space', 'dataset') @cloud_edition_billing_resource_check('vector_space', 'dataset')
@cloud_edition_billing_resource_check('documents', 'dataset')
def post(self, tenant_id, dataset_id): def post(self, tenant_id, dataset_id):
"""Create document by upload file.""" """Create document by upload file."""
args = {} args = {}
......
...@@ -89,6 +89,7 @@ def cloud_edition_billing_resource_check(resource: str, ...@@ -89,6 +89,7 @@ def cloud_edition_billing_resource_check(resource: str,
members = features.members members = features.members
apps = features.apps apps = features.apps
vector_space = features.vector_space vector_space = features.vector_space
documents_upload_quota = features.documents_upload_quota
if resource == 'members' and 0 < members.limit <= members.size: if resource == 'members' and 0 < members.limit <= members.size:
raise Unauthorized(error_msg) raise Unauthorized(error_msg)
...@@ -96,6 +97,8 @@ def cloud_edition_billing_resource_check(resource: str, ...@@ -96,6 +97,8 @@ def cloud_edition_billing_resource_check(resource: str,
raise Unauthorized(error_msg) raise Unauthorized(error_msg)
elif resource == 'vector_space' and 0 < vector_space.limit <= vector_space.size: elif resource == 'vector_space' and 0 < vector_space.limit <= vector_space.size:
raise Unauthorized(error_msg) raise Unauthorized(error_msg)
elif resource == 'documents' and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
raise Unauthorized(error_msg)
else: else:
return view(*args, **kwargs) return view(*args, **kwargs)
......
...@@ -84,7 +84,7 @@ class TextApi(WebApiResource): ...@@ -84,7 +84,7 @@ class TextApi(WebApiResource):
tenant_id=app_model.tenant_id, tenant_id=app_model.tenant_id,
text=request.form['text'], text=request.form['text'],
end_user=end_user.external_user_id, end_user=end_user.external_user_id,
voice=app_model.app_model_config.text_to_speech_dict.get('voice'), voice=request.form['voice'] if request.form['voice'] else app_model.app_model_config.text_to_speech_dict.get('voice'),
streaming=False streaming=False
) )
......
...@@ -84,7 +84,7 @@ class AppRunner: ...@@ -84,7 +84,7 @@ class AppRunner:
return rest_tokens return rest_tokens
def recale_llm_max_tokens(self, model_config: ModelConfigEntity, def recalc_llm_max_tokens(self, model_config: ModelConfigEntity,
prompt_messages: list[PromptMessage]): prompt_messages: list[PromptMessage]):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit # recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_type_instance = model_config.provider_model_bundle.model_type_instance model_type_instance = model_config.provider_model_bundle.model_type_instance
......
...@@ -181,7 +181,7 @@ class BasicApplicationRunner(AppRunner): ...@@ -181,7 +181,7 @@ class BasicApplicationRunner(AppRunner):
return return
# Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit # Re-calculate the max tokens if sum(prompt_token + max_tokens) over model token limit
self.recale_llm_max_tokens( self.recalc_llm_max_tokens(
model_config=app_orchestration_config.model_config, model_config=app_orchestration_config.model_config,
prompt_messages=prompt_messages prompt_messages=prompt_messages
) )
......
...@@ -130,8 +130,8 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner): ...@@ -130,8 +130,8 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
input=query input=query
) )
# recale llm max tokens # recalc llm max tokens
self.recale_llm_max_tokens(self.model_config, prompt_messages) self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model # invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm( chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
prompt_messages=prompt_messages, prompt_messages=prompt_messages,
......
...@@ -105,8 +105,8 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner): ...@@ -105,8 +105,8 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
messages_ids=message_file_ids messages_ids=message_file_ids
) )
# recale llm max tokens # recalc llm max tokens
self.recale_llm_max_tokens(self.model_config, prompt_messages) self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model # invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm( chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(
prompt_messages=prompt_messages, prompt_messages=prompt_messages,
......
...@@ -21,7 +21,7 @@ class AnthropicProvider(ModelProvider): ...@@ -21,7 +21,7 @@ class AnthropicProvider(ModelProvider):
# Use `claude-instant-1` model for validate, # Use `claude-instant-1` model for validate,
model_instance.validate_credentials( model_instance.validate_credentials(
model='claude-instant-1', model='claude-instant-1.2',
credentials=credentials credentials=credentials
) )
except CredentialsValidateFailedError as ex: except CredentialsValidateFailedError as ex:
......
...@@ -2,8 +2,8 @@ provider: anthropic ...@@ -2,8 +2,8 @@ provider: anthropic
label: label:
en_US: Anthropic en_US: Anthropic
description: description:
en_US: Anthropic’s powerful models, such as Claude 2 and Claude Instant. en_US: Anthropic’s powerful models, such as Claude 3.
zh_Hans: Anthropic 的强大模型,例如 Claude 2 和 Claude Instant zh_Hans: Anthropic 的强大模型,例如 Claude 3
icon_small: icon_small:
en_US: icon_s_en.svg en_US: icon_s_en.svg
icon_large: icon_large:
......
- claude-3-opus-20240229
- claude-3-sonnet-20240229
- claude-2.1
- claude-instant-1.2
- claude-2
- claude-instant-1
...@@ -34,3 +34,4 @@ pricing: ...@@ -34,3 +34,4 @@ pricing:
output: '24.00' output: '24.00'
unit: '0.000001' unit: '0.000001'
currency: USD currency: USD
deprecated: true
model: claude-3-opus-20240229
label:
en_US: claude-3-opus-20240229
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_tokens
use_template: max_tokens
required: true
default: 4096
min: 1
max: 4096
- name: response_format
use_template: response_format
pricing:
input: '15.00'
output: '75.00'
unit: '0.000001'
currency: USD
model: claude-3-sonnet-20240229
label:
en_US: claude-3-sonnet-20240229
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 200000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_tokens
use_template: max_tokens
required: true
default: 4096
min: 1
max: 4096
- name: response_format
use_template: response_format
pricing:
input: '3.00'
output: '15.00'
unit: '0.000001'
currency: USD
model: claude-instant-1.2
label:
en_US: claude-instant-1.2
model_type: llm
features: [ ]
model_properties:
mode: chat
context_size: 100000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
required: false
- name: max_tokens
use_template: max_tokens
required: true
default: 4096
min: 1
max: 4096
- name: response_format
use_template: response_format
pricing:
input: '1.63'
output: '5.51'
unit: '0.000001'
currency: USD
...@@ -33,3 +33,4 @@ pricing: ...@@ -33,3 +33,4 @@ pricing:
output: '5.51' output: '5.51'
unit: '0.000001' unit: '0.000001'
currency: USD currency: USD
deprecated: true
...@@ -2,7 +2,7 @@ provider: jina ...@@ -2,7 +2,7 @@ provider: jina
label: label:
en_US: Jina en_US: Jina
description: description:
en_US: Embedding Model Supported en_US: Embedding and Rerank Model Supported
icon_small: icon_small:
en_US: icon_s_en.svg en_US: icon_s_en.svg
icon_large: icon_large:
...@@ -13,9 +13,10 @@ help: ...@@ -13,9 +13,10 @@ help:
en_US: Get your API key from Jina AI en_US: Get your API key from Jina AI
zh_Hans: 从 Jina 获取 API Key zh_Hans: 从 Jina 获取 API Key
url: url:
en_US: https://jina.ai/embeddings/ en_US: https://jina.ai/
supported_model_types: supported_model_types:
- text-embedding - text-embedding
- rerank
configurate_methods: configurate_methods:
- predefined-model - predefined-model
provider_credential_schema: provider_credential_schema:
......
model: jina-reranker-v1-base-en
model_type: rerank
model_properties:
context_size: 8192
from typing import Optional
import httpx
from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
InvokeConnectionError,
InvokeError,
InvokeRateLimitError,
InvokeServerUnavailableError,
)
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.__base.rerank_model import RerankModel
class JinaRerankModel(RerankModel):
"""
Model class for Jina rerank model.
"""
def _invoke(self, model: str, credentials: dict,
query: str, docs: list[str], score_threshold: Optional[float] = None, top_n: Optional[int] = None,
user: Optional[str] = None) -> RerankResult:
"""
Invoke rerank model
:param model: model name
:param credentials: model credentials
:param query: search query
:param docs: docs for reranking
:param score_threshold: score threshold
:param top_n: top n documents to return
:param user: unique user id
:return: rerank result
"""
if len(docs) == 0:
return RerankResult(model=model, docs=[])
try:
response = httpx.post(
"https://api.jina.ai/v1/rerank",
json={
"model": model,
"query": query,
"documents": docs,
"top_n": top_n
},
headers={"Authorization": f"Bearer {credentials.get('api_key')}"}
)
response.raise_for_status()
results = response.json()
rerank_documents = []
for result in results['results']:
rerank_document = RerankDocument(
index=result['index'],
text=result['document']['text'],
score=result['relevance_score'],
)
if score_threshold is None or result['relevance_score'] >= score_threshold:
rerank_documents.append(rerank_document)
return RerankResult(model=model, docs=rerank_documents)
except httpx.HTTPStatusError as e:
raise InvokeServerUnavailableError(str(e))
def validate_credentials(self, model: str, credentials: dict) -> None:
"""
Validate model credentials
:param model: model name
:param credentials: model credentials
:return:
"""
try:
self._invoke(
model=model,
credentials=credentials,
query="What is the capital of the United States?",
docs=[
"Carson City is the capital city of the American state of Nevada. At the 2010 United States "
"Census, Carson City had a population of 55,274.",
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
"are a political division controlled by the United States. Its capital is Saipan.",
],
score_threshold=0.8
)
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
@property
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
"""
return {
InvokeConnectionError: [httpx.ConnectError],
InvokeServerUnavailableError: [httpx.RemoteProtocolError],
InvokeRateLimitError: [],
InvokeAuthorizationError: [httpx.HTTPStatusError],
InvokeBadRequestError: [httpx.RequestError]
}
...@@ -34,7 +34,7 @@ class OpenAIText2SpeechModel(_CommonOpenAI, TTSModel): ...@@ -34,7 +34,7 @@ class OpenAIText2SpeechModel(_CommonOpenAI, TTSModel):
:return: text translated to audio file :return: text translated to audio file
""" """
audio_type = self._get_model_audio_type(model, credentials) audio_type = self._get_model_audio_type(model, credentials)
if not voice: if not voice or voice not in [d['value'] for d in self.get_tts_model_voices(model=model, credentials=credentials)]:
voice = self._get_model_default_voice(model, credentials) voice = self._get_model_default_voice(model, credentials)
if streaming: if streaming:
return Response(stream_with_context(self._tts_invoke_streaming(model=model, return Response(stream_with_context(self._tts_invoke_streaming(model=model,
......
...@@ -34,7 +34,7 @@ class TongyiText2SpeechModel(_CommonTongyi, TTSModel): ...@@ -34,7 +34,7 @@ class TongyiText2SpeechModel(_CommonTongyi, TTSModel):
:return: text translated to audio file :return: text translated to audio file
""" """
audio_type = self._get_model_audio_type(model, credentials) audio_type = self._get_model_audio_type(model, credentials)
if not voice or voice not in self.get_tts_model_voices(model=model, credentials=credentials): if not voice or voice not in [d['value'] for d in self.get_tts_model_voices(model=model, credentials=credentials)]:
voice = self._get_model_default_voice(model, credentials) voice = self._get_model_default_voice(model, credentials)
if streaming: if streaming:
return Response(stream_with_context(self._tts_invoke_streaming(model=model, return Response(stream_with_context(self._tts_invoke_streaming(model=model,
......
...@@ -140,6 +140,7 @@ class MilvusVector(BaseVector): ...@@ -140,6 +140,7 @@ class MilvusVector(BaseVector):
connections.connect(alias=alias, uri=uri, user=self._client_config.user, password=self._client_config.password) connections.connect(alias=alias, uri=uri, user=self._client_config.user, password=self._client_config.password)
from pymilvus import utility from pymilvus import utility
if utility.has_collection(self._collection_name, using=alias):
utility.drop_collection(self._collection_name, None, using=alias) utility.drop_collection(self._collection_name, None, using=alias)
def text_exists(self, id: str) -> bool: def text_exists(self, id: str) -> bool:
......
...@@ -231,6 +231,9 @@ class QdrantVector(BaseVector): ...@@ -231,6 +231,9 @@ class QdrantVector(BaseVector):
def delete(self): def delete(self):
from qdrant_client.http import models from qdrant_client.http import models
from qdrant_client.http.exceptions import UnexpectedResponse
try:
filter = models.Filter( filter = models.Filter(
must=[ must=[
models.FieldCondition( models.FieldCondition(
...@@ -245,7 +248,13 @@ class QdrantVector(BaseVector): ...@@ -245,7 +248,13 @@ class QdrantVector(BaseVector):
filter=filter filter=filter
), ),
) )
except UnexpectedResponse as e:
# Collection does not exist, so return
if e.status_code == 404:
return
# Some other error occurred, so re-raise the exception
else:
raise e
def delete_by_ids(self, ids: list[str]) -> None: def delete_by_ids(self, ids: list[str]) -> None:
from qdrant_client.http import models from qdrant_client.http import models
......
- google - google
- bing - bing
- duckduckgo
- yahoo
- wikipedia - wikipedia
- arxiv
- pubmed
- dalle - dalle
- azuredalle - azuredalle
- stablediffusion
- webscraper - webscraper
- youtube
- wolframalpha - wolframalpha
- maths
- github - github
- chart - chart
- time - time
- yahoo
- stablediffusion
- vectorizer - vectorizer
- youtube
- gaode - gaode
- maths - wecom
...@@ -16,7 +16,8 @@ class BingProvider(BuiltinToolProviderController): ...@@ -16,7 +16,8 @@ class BingProvider(BuiltinToolProviderController):
user_id='', user_id='',
tool_parameters={ tool_parameters={
"query": "test", "query": "test",
"result_type": "link" "result_type": "link",
"enable_webpages": True,
}, },
) )
except Exception as e: except Exception as e:
......
<svg height="512" viewBox="0 0 448 512" width="448" xmlns="http://www.w3.org/2000/svg"><path d="m48 32c-26.5 0-48 21.5-48 48v352c0 26.5 21.5 48 48 48h352c26.5 0 48-21.5 48-48v-352c0-26.5-21.5-48-48-48zm69.56445 64s49.09165 11.12539 46.59571 94.78125c0 0 41.47034-117.171493 204.5664 1.64844 0 42.78788-.31445 172.24246-.31445 223.57031-176.89733-149.87989-207.38477-22.06836-207.38477-22.06836 0-79.8558-81.753902-70.33984-81.753902-70.33984v-212.65039s18.755175 1.4021 38.291012 11.11132zm86.14649 98.2832-24.00196 141.34961h36.5625l11.81446-81.3789h.37304l32.44727 81.3789h14.63281l33.93946-81.3789h.37304l10.31446 81.3789h36.7832l-21.40234-141.34961h-36.5625l-30.38868 75.54102-28.69531-75.54102z"/></svg>
\ No newline at end of file
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.pubmed.tools.pubmed_search import PubMedSearchTool
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class PubMedProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict) -> None:
try:
PubMedSearchTool().fork_tool_runtime(
meta={
"credentials": credentials,
}
).invoke(
user_id='',
tool_parameters={
"query": "John Doe",
},
)
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))
\ No newline at end of file
identity:
author: Pink Banana
name: pubmed
label:
en_US: PubMed
zh_Hans: PubMed
description:
en_US: A search engine for biomedical literature.
zh_Hans: 一款生物医学文献搜索引擎。
icon: icon.svg
from typing import Any
from langchain.tools import PubmedQueryRun
from pydantic import BaseModel, Field
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
class PubMedInput(BaseModel):
query: str = Field(..., description="Search query.")
class PubMedSearchTool(BuiltinTool):
"""
Tool for performing a search using PubMed search engine.
"""
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage | list[ToolInvokeMessage]:
"""
Invoke the PubMed search tool.
Args:
user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Any]): The parameters for the tool invocation.
Returns:
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation.
"""
query = tool_parameters.get('query', '')
if not query:
return self.create_text_message('Please input query')
tool = PubmedQueryRun(args_schema=PubMedInput)
result = tool.run(query)
return self.create_text_message(self.summary(user_id=user_id, content=result))
\ No newline at end of file
identity:
name: pubmed_search
author: Pink Banana
label:
en_US: PubMed Search
zh_Hans: PubMed 搜索
description:
human:
en_US: PubMed® comprises more than 35 million citations for biomedical literature from MEDLINE, life science journals, and online books. Citations may include links to full text content from PubMed Central and publisher web sites.
zh_Hans: PubMed® 包含来自 MEDLINE、生命科学期刊和在线书籍的超过 3500 万篇生物医学文献引用。引用可能包括来自 PubMed Central 和出版商网站的全文内容链接。
llm: Perform searches on PubMed and get results.
parameters:
- name: query
type: string
required: true
label:
en_US: Query string
zh_Hans: 查询语句
human_description:
en_US: The search query.
zh_Hans: 搜索查询语句。
llm_description: Key words for searching
form: llm
from typing import Any
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin.tavily.tools.tavily_search import TavilySearchTool
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class TavilyProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict[str, Any]) -> None:
try:
TavilySearchTool().fork_tool_runtime(
meta={
"credentials": credentials,
}
).invoke(
user_id='',
tool_parameters={
"query": "Sachin Tendulkar",
},
)
except Exception as e:
raise ToolProviderCredentialValidationError(str(e))
\ No newline at end of file
identity:
author: Yash Parmar
name: tavily
label:
en_US: Tavily
zh_Hans: Tavily
pt_BR: Tavily
description:
en_US: Tavily
zh_Hans: Tavily
pt_BR: Tavily
icon: icon.png
credentials_for_provider:
tavily_api_key:
type: secret-input
required: true
label:
en_US: Tavily API key
zh_Hans: Tavily API key
pt_BR: Tavily API key
placeholder:
en_US: Please input your Tavily API key
zh_Hans: 请输入你的 Tavily API key
pt_BR: Please input your Tavily API key
help:
en_US: Get your Tavily API key from Tavily
zh_Hans: 从 TavilyApi 获取您的 Tavily API key
pt_BR: Get your Tavily API key from Tavily
url: https://docs.tavily.com/docs/tavily-api/introduction
from typing import Any, Optional
import requests
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
TAVILY_API_URL = "https://api.tavily.com"
class TavilySearch:
"""
A class for performing search operations using the Tavily Search API.
Args:
api_key (str): The API key for accessing the Tavily Search API.
Methods:
raw_results: Retrieves raw search results from the Tavily Search API.
results: Retrieves cleaned search results from the Tavily Search API.
clean_results: Cleans the raw search results.
"""
def __init__(self, api_key: str) -> None:
self.api_key = api_key
def raw_results(
self,
query: str,
max_results: Optional[int] = 3,
search_depth: Optional[str] = "advanced",
include_domains: Optional[list[str]] = [],
exclude_domains: Optional[list[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> dict:
"""
Retrieves raw search results from the Tavily Search API.
Args:
query (str): The search query.
max_results (int, optional): The maximum number of results to retrieve. Defaults to 3.
search_depth (str, optional): The search depth. Defaults to "advanced".
include_domains (List[str], optional): The domains to include in the search. Defaults to [].
exclude_domains (List[str], optional): The domains to exclude from the search. Defaults to [].
include_answer (bool, optional): Whether to include answer in the search results. Defaults to False.
include_raw_content (bool, optional): Whether to include raw content in the search results. Defaults to False.
include_images (bool, optional): Whether to include images in the search results. Defaults to False.
Returns:
dict: The raw search results.
"""
params = {
"api_key": self.api_key,
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_domains": include_domains,
"exclude_domains": exclude_domains,
"include_answer": include_answer,
"include_raw_content": include_raw_content,
"include_images": include_images,
}
response = requests.post(f"{TAVILY_API_URL}/search", json=params)
response.raise_for_status()
return response.json()
def results(
self,
query: str,
max_results: Optional[int] = 3,
search_depth: Optional[str] = "advanced",
include_domains: Optional[list[str]] = [],
exclude_domains: Optional[list[str]] = [],
include_answer: Optional[bool] = False,
include_raw_content: Optional[bool] = False,
include_images: Optional[bool] = False,
) -> list[dict]:
"""
Retrieves cleaned search results from the Tavily Search API.
Args:
query (str): The search query.
max_results (int, optional): The maximum number of results to retrieve. Defaults to 3.
search_depth (str, optional): The search depth. Defaults to "advanced".
include_domains (List[str], optional): The domains to include in the search. Defaults to [].
exclude_domains (List[str], optional): The domains to exclude from the search. Defaults to [].
include_answer (bool, optional): Whether to include answer in the search results. Defaults to False.
include_raw_content (bool, optional): Whether to include raw content in the search results. Defaults to False.
include_images (bool, optional): Whether to include images in the search results. Defaults to False.
Returns:
list: The cleaned search results.
"""
raw_search_results = self.raw_results(
query,
max_results=max_results,
search_depth=search_depth,
include_domains=include_domains,
exclude_domains=exclude_domains,
include_answer=include_answer,
include_raw_content=include_raw_content,
include_images=include_images,
)
return self.clean_results(raw_search_results["results"])
def clean_results(self, results: list[dict]) -> list[dict]:
"""
Cleans the raw search results.
Args:
results (list): The raw search results.
Returns:
list: The cleaned search results.
"""
clean_results = []
for result in results:
clean_results.append(
{
"url": result["url"],
"content": result["content"],
}
)
# return clean results as a string
return "\n".join([f"{res['url']}\n{res['content']}" for res in clean_results])
class TavilySearchTool(BuiltinTool):
"""
A tool for searching Tavily using a given query.
"""
def _invoke(
self, user_id: str, tool_parameters: dict[str, Any]
) -> ToolInvokeMessage | list[ToolInvokeMessage]:
"""
Invokes the Tavily search tool with the given user ID and tool parameters.
Args:
user_id (str): The ID of the user invoking the tool.
tool_parameters (Dict[str, Any]): The parameters for the Tavily search tool.
Returns:
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the Tavily search tool invocation.
"""
query = tool_parameters.get("query", "")
api_key = self.runtime.credentials["tavily_api_key"]
if not query:
return self.create_text_message("Please input query")
tavily_search = TavilySearch(api_key)
results = tavily_search.results(query)
print(results)
if not results:
return self.create_text_message(f"No results found for '{query}' in Tavily")
else:
return self.create_text_message(text=results)
identity:
name: tavily_search
author: Yash Parmar
label:
en_US: TavilySearch
zh_Hans: TavilySearch
pt_BR: TavilySearch
description:
human:
en_US: A tool for search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed.
zh_Hans: 专为人工智能代理 (LLM) 构建的搜索引擎工具,可快速提供实时、准确和真实的结果。
pt_BR: A tool for search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed.
llm: A tool for search engine built specifically for AI agents (LLMs), delivering real-time, accurate, and factual results at speed.
parameters:
- name: query
type: string
required: true
label:
en_US: Query string
zh_Hans: 查询语句
pt_BR: Query string
human_description:
en_US: used for searching
zh_Hans: 用于搜索网页内容
pt_BR: used for searching
llm_description: key words for searching
form: llm
from typing import Any, Union
import httpx
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.tool.builtin_tool import BuiltinTool
class WecomRepositoriesTool(BuiltinTool):
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]
) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]:
"""
invoke tools
"""
content = tool_parameters.get('content', '')
if not content:
return self.create_text_message('Invalid parameter content')
hook_key = tool_parameters.get('hook_key', '')
if not hook_key:
return self.create_text_message('Invalid parameter hook_key')
msgtype = 'text'
api_url = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send'
headers = {
'Content-Type': 'application/json',
}
params = {
'key': hook_key,
}
payload = {
"msgtype": msgtype,
"text": {
"content": content,
}
}
try:
res = httpx.post(api_url, headers=headers, params=params, json=payload)
if res.is_success:
return self.create_text_message("Text message sent successfully")
else:
return self.create_text_message(
f"Failed to send the text message, status code: {res.status_code}, response: {res.text}")
except Exception as e:
return self.create_text_message("Failed to send message to group chat bot. {}".format(e))
identity:
name: wecom_group_bot
author: Bowen Liang
label:
en_US: Send Group Message
zh_Hans: 发送群消息
pt_BR: Send Group Message
icon: icon.svg
description:
human:
en_US: Sending a group message on Wecom via the webhook of group bot
zh_Hans: 通过企业微信的群机器人webhook发送群消息
pt_BR: Sending a group message on Wecom via the webhook of group bot
llm: A tool for sending messages to a chat group on Wecom(企业微信) .
parameters:
- name: hook_key
type: string
required: true
label:
en_US: Wecom Group bot webhook key
zh_Hans: 群机器人webhook的key
pt_BR: Wecom Group bot webhook key
human_description:
en_US: Wecom Group bot webhook key
zh_Hans: 群机器人webhook的key
pt_BR: Wecom Group bot webhook key
form: form
- name: content
type: string
required: true
label:
en_US: content
zh_Hans: 消息内容
pt_BR: content
human_description:
en_US: Content to sent to the group.
zh_Hans: 群消息文本
pt_BR: Content to sent to the group.
llm_description: Content of the message
form: llm
from core.tools.provider.builtin.wecom.tools.wecom_group_bot import WecomRepositoriesTool
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
class WecomProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict) -> None:
WecomRepositoriesTool()
pass
identity:
author: Bowen Liang
name: wecom
label:
en_US: Wecom
zh_Hans: 企业微信
pt_BR: Wecom
description:
en_US: Wecom group bot
zh_Hans: 企业微信群机器人
pt_BR: Wecom group bot
icon: icon.png
credentials_for_provider:
import json import json
from json import dumps from json import dumps
from typing import Any, Union from typing import Any, Union
from urllib.parse import urlencode
import httpx import httpx
import requests import requests
...@@ -203,6 +204,8 @@ class ApiTool(Tool): ...@@ -203,6 +204,8 @@ class ApiTool(Tool):
if 'Content-Type' in headers: if 'Content-Type' in headers:
if headers['Content-Type'] == 'application/json': if headers['Content-Type'] == 'application/json':
body = dumps(body) body = dumps(body)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded':
body = urlencode(body)
else: else:
body = body body = body
......
...@@ -35,7 +35,7 @@ docx2txt==0.8 ...@@ -35,7 +35,7 @@ docx2txt==0.8
pypdfium2==4.16.0 pypdfium2==4.16.0
resend~=0.7.0 resend~=0.7.0
pyjwt~=2.8.0 pyjwt~=2.8.0
anthropic~=0.7.7 anthropic~=0.17.0
newspaper3k==0.2.8 newspaper3k==0.2.8
google-api-python-client==2.90.0 google-api-python-client==2.90.0
wikipedia==1.4.0 wikipedia==1.4.0
...@@ -52,7 +52,7 @@ safetensors==0.3.2 ...@@ -52,7 +52,7 @@ safetensors==0.3.2
zhipuai==1.0.7 zhipuai==1.0.7
werkzeug~=3.0.1 werkzeug~=3.0.1
pymilvus==2.3.0 pymilvus==2.3.0
qdrant-client==1.6.4 qdrant-client==1.7.3
cohere~=4.44 cohere~=4.44
pyyaml~=6.0.1 pyyaml~=6.0.1
numpy~=1.25.2 numpy~=1.25.2
......
...@@ -37,7 +37,7 @@ from services.errors.account import NoPermissionError ...@@ -37,7 +37,7 @@ from services.errors.account import NoPermissionError
from services.errors.dataset import DatasetNameDuplicateError from services.errors.dataset import DatasetNameDuplicateError
from services.errors.document import DocumentIndexingError from services.errors.document import DocumentIndexingError
from services.errors.file import FileNotExistsError from services.errors.file import FileNotExistsError
from services.feature_service import FeatureService from services.feature_service import FeatureModel, FeatureService
from services.vector_service import VectorService from services.vector_service import VectorService
from tasks.clean_notion_document_task import clean_notion_document_task from tasks.clean_notion_document_task import clean_notion_document_task
from tasks.deal_dataset_vector_index_task import deal_dataset_vector_index_task from tasks.deal_dataset_vector_index_task import deal_dataset_vector_index_task
...@@ -469,6 +469,9 @@ class DocumentService: ...@@ -469,6 +469,9 @@ class DocumentService:
batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT']) batch_upload_limit = int(current_app.config['BATCH_UPLOAD_LIMIT'])
if count > batch_upload_limit: if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.") raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
DocumentService.check_documents_upload_quota(count, features)
# if dataset is empty, update dataset data_source_type # if dataset is empty, update dataset data_source_type
if not dataset.data_source_type: if not dataset.data_source_type:
dataset.data_source_type = document_data["data_source"]["type"] dataset.data_source_type = document_data["data_source"]["type"]
...@@ -619,6 +622,12 @@ class DocumentService: ...@@ -619,6 +622,12 @@ class DocumentService:
return documents, batch return documents, batch
@staticmethod
def check_documents_upload_quota(count: int, features: FeatureModel):
can_upload_size = features.documents_upload_quota.limit - features.documents_upload_quota.size
if count > can_upload_size:
raise ValueError(f'You have reached the limit of your subscription. Only {can_upload_size} documents can be uploaded.')
@staticmethod @staticmethod
def build_document(dataset: Dataset, process_rule_id: str, data_source_type: str, document_form: str, def build_document(dataset: Dataset, process_rule_id: str, data_source_type: str, document_form: str,
document_language: str, data_source_info: dict, created_from: str, position: int, document_language: str, data_source_info: dict, created_from: str, position: int,
...@@ -763,6 +772,8 @@ class DocumentService: ...@@ -763,6 +772,8 @@ class DocumentService:
if count > batch_upload_limit: if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.") raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
DocumentService.check_documents_upload_quota(count, features)
embedding_model = None embedding_model = None
dataset_collection_binding_id = None dataset_collection_binding_id = None
retrieval_model = None retrieval_model = None
......
...@@ -25,6 +25,7 @@ class FeatureModel(BaseModel): ...@@ -25,6 +25,7 @@ class FeatureModel(BaseModel):
apps: LimitationModel = LimitationModel(size=0, limit=10) apps: LimitationModel = LimitationModel(size=0, limit=10)
vector_space: LimitationModel = LimitationModel(size=0, limit=5) vector_space: LimitationModel = LimitationModel(size=0, limit=5)
annotation_quota_limit: LimitationModel = LimitationModel(size=0, limit=10) annotation_quota_limit: LimitationModel = LimitationModel(size=0, limit=10)
documents_upload_quota: LimitationModel = LimitationModel(size=0, limit=50)
docs_processing: str = 'standard' docs_processing: str = 'standard'
can_replace_logo: bool = False can_replace_logo: bool = False
...@@ -63,6 +64,9 @@ class FeatureService: ...@@ -63,6 +64,9 @@ class FeatureService:
features.vector_space.size = billing_info['vector_space']['size'] features.vector_space.size = billing_info['vector_space']['size']
features.vector_space.limit = billing_info['vector_space']['limit'] features.vector_space.limit = billing_info['vector_space']['limit']
features.documents_upload_quota.size = billing_info['documents_upload_quota']['size']
features.documents_upload_quota.limit = billing_info['documents_upload_quota']['limit']
features.annotation_quota_limit.size = billing_info['annotation_quota_limit']['size'] features.annotation_quota_limit.size = billing_info['annotation_quota_limit']['size']
features.annotation_quota_limit.limit = billing_info['annotation_quota_limit']['limit'] features.annotation_quota_limit.limit = billing_info['annotation_quota_limit']['limit']
......
...@@ -498,12 +498,16 @@ class ToolManageService: ...@@ -498,12 +498,16 @@ class ToolManageService:
@staticmethod @staticmethod
def test_api_tool_preview( def test_api_tool_preview(
tenant_id: str, tool_name: str, credentials: dict, parameters: dict, schema_type: str, schema: str tenant_id: str,
provider_name: str,
tool_name: str,
credentials: dict,
parameters: dict,
schema_type: str,
schema: str
): ):
""" """
test api tool before adding api tool provider test api tool before adding api tool provider
1. parse schema into tool bundle
""" """
if schema_type not in [member.value for member in ApiProviderSchemaType]: if schema_type not in [member.value for member in ApiProviderSchemaType]:
raise ValueError(f'invalid schema type {schema_type}') raise ValueError(f'invalid schema type {schema_type}')
...@@ -518,6 +522,12 @@ class ToolManageService: ...@@ -518,6 +522,12 @@ class ToolManageService:
if tool_bundle is None: if tool_bundle is None:
raise ValueError(f'invalid tool name {tool_name}') raise ValueError(f'invalid tool name {tool_name}')
db_provider: ApiToolProvider = db.session.query(ApiToolProvider).filter(
ApiToolProvider.tenant_id == tenant_id,
ApiToolProvider.name == provider_name,
).first()
if not db_provider:
# create a fake db provider # create a fake db provider
db_provider = ApiToolProvider( db_provider = ApiToolProvider(
tenant_id='', user_id='', name='', icon='', tenant_id='', user_id='', name='', icon='',
...@@ -539,6 +549,19 @@ class ToolManageService: ...@@ -539,6 +549,19 @@ class ToolManageService:
# load tools into provider entity # load tools into provider entity
provider_controller.load_bundled_tools(tool_bundles) provider_controller.load_bundled_tools(tool_bundles)
# decrypt credentials
if db_provider.id:
tool_configuration = ToolConfiguration(
tenant_id=tenant_id,
provider_controller=provider_controller
)
decrypted_credentials = tool_configuration.decrypt_tool_credentials(credentials)
# check if the credential has changed, save the original credential
masked_credentials = tool_configuration.mask_tool_credentials(decrypted_credentials)
for name, value in credentials.items():
if name in masked_credentials and value == masked_credentials[name]:
credentials[name] = decrypted_credentials[name]
try: try:
provider_controller.validate_credentials_format(credentials) provider_controller.validate_credentials_format(credentials)
# get tool # get tool
......
import os import os
from time import sleep from time import sleep
from typing import Any, Generator, List, Literal, Union from typing import Any, Literal, Union, Iterable
from anthropic.resources import Messages
from anthropic.types.message_delta_event import Delta
import anthropic import anthropic
import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
from anthropic import Anthropic from anthropic import Anthropic, Stream
from anthropic._types import NOT_GIVEN, Body, Headers, NotGiven, Query from anthropic.types import MessageParam, Message, MessageStreamEvent, \
from anthropic.resources.completions import Completions ContentBlock, MessageStartEvent, Usage, TextDelta, MessageDeltaEvent, MessageStopEvent, ContentBlockDeltaEvent, \
from anthropic.types import Completion, completion_create_params MessageDeltaUsage
MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true' MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'
class MockAnthropicClass(object): class MockAnthropicClass(object):
@staticmethod @staticmethod
def mocked_anthropic_chat_create_sync(model: str) -> Completion: def mocked_anthropic_chat_create_sync(model: str) -> Message:
return Completion( return Message(
completion='hello, I\'m a chatbot from anthropic', id='msg-123',
type='message',
role='assistant',
content=[ContentBlock(text='hello, I\'m a chatbot from anthropic', type='text')],
model=model, model=model,
stop_reason='stop_sequence' stop_reason='stop_sequence',
usage=Usage(
input_tokens=1,
output_tokens=1
)
) )
@staticmethod @staticmethod
def mocked_anthropic_chat_create_stream(model: str) -> Generator[Completion, None, None]: def mocked_anthropic_chat_create_stream(model: str) -> Stream[MessageStreamEvent]:
full_response_text = "hello, I'm a chatbot from anthropic" full_response_text = "hello, I'm a chatbot from anthropic"
for i in range(0, len(full_response_text) + 1): yield MessageStartEvent(
sleep(0.1) type='message_start',
if i == len(full_response_text): message=Message(
yield Completion( id='msg-123',
completion='', content=[],
role='assistant',
model=model, model=model,
stop_reason=None,
type='message',
usage=Usage(
input_tokens=1,
output_tokens=1
)
)
)
index = 0
for i in range(0, len(full_response_text)):
sleep(0.1)
yield ContentBlockDeltaEvent(
type='content_block_delta',
delta=TextDelta(text=full_response_text[i], type='text_delta'),
index=index
)
index += 1
yield MessageDeltaEvent(
type='message_delta',
delta=Delta(
stop_reason='stop_sequence' stop_reason='stop_sequence'
),
usage=MessageDeltaUsage(
output_tokens=1
) )
else:
yield Completion(
completion=full_response_text[i],
model=model,
stop_reason=''
) )
def mocked_anthropic(self: Completions, *, yield MessageStopEvent(type='message_stop')
max_tokens_to_sample: int,
model: Union[str, Literal["claude-2.1", "claude-instant-1"]], def mocked_anthropic(self: Messages, *,
prompt: str, max_tokens: int,
messages: Iterable[MessageParam],
model: str,
stream: Literal[True], stream: Literal[True],
**kwargs: Any **kwargs: Any
) -> Union[Completion, Generator[Completion, None, None]]: ) -> Union[Message, Stream[MessageStreamEvent]]:
if len(self._client.api_key) < 18: if len(self._client.api_key) < 18:
raise anthropic.AuthenticationError('Invalid API key') raise anthropic.AuthenticationError('Invalid API key')
...@@ -55,10 +90,11 @@ class MockAnthropicClass(object): ...@@ -55,10 +90,11 @@ class MockAnthropicClass(object):
else: else:
return MockAnthropicClass.mocked_anthropic_chat_create_sync(model=model) return MockAnthropicClass.mocked_anthropic_chat_create_sync(model=model)
@pytest.fixture @pytest.fixture
def setup_anthropic_mock(request, monkeypatch: MonkeyPatch): def setup_anthropic_mock(request, monkeypatch: MonkeyPatch):
if MOCK: if MOCK:
monkeypatch.setattr(Completions, 'create', MockAnthropicClass.mocked_anthropic) monkeypatch.setattr(Messages, 'create', MockAnthropicClass.mocked_anthropic)
yield yield
......
...@@ -15,14 +15,14 @@ def test_validate_credentials(setup_anthropic_mock): ...@@ -15,14 +15,14 @@ def test_validate_credentials(setup_anthropic_mock):
with pytest.raises(CredentialsValidateFailedError): with pytest.raises(CredentialsValidateFailedError):
model.validate_credentials( model.validate_credentials(
model='claude-instant-1', model='claude-instant-1.2',
credentials={ credentials={
'anthropic_api_key': 'invalid_key' 'anthropic_api_key': 'invalid_key'
} }
) )
model.validate_credentials( model.validate_credentials(
model='claude-instant-1', model='claude-instant-1.2',
credentials={ credentials={
'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY') 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY')
} }
...@@ -33,7 +33,7 @@ def test_invoke_model(setup_anthropic_mock): ...@@ -33,7 +33,7 @@ def test_invoke_model(setup_anthropic_mock):
model = AnthropicLargeLanguageModel() model = AnthropicLargeLanguageModel()
response = model.invoke( response = model.invoke(
model='claude-instant-1', model='claude-instant-1.2',
credentials={ credentials={
'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY'), 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY'),
'anthropic_api_url': os.environ.get('ANTHROPIC_API_URL') 'anthropic_api_url': os.environ.get('ANTHROPIC_API_URL')
...@@ -49,7 +49,7 @@ def test_invoke_model(setup_anthropic_mock): ...@@ -49,7 +49,7 @@ def test_invoke_model(setup_anthropic_mock):
model_parameters={ model_parameters={
'temperature': 0.0, 'temperature': 0.0,
'top_p': 1.0, 'top_p': 1.0,
'max_tokens_to_sample': 10 'max_tokens': 10
}, },
stop=['How'], stop=['How'],
stream=False, stream=False,
...@@ -64,7 +64,7 @@ def test_invoke_stream_model(setup_anthropic_mock): ...@@ -64,7 +64,7 @@ def test_invoke_stream_model(setup_anthropic_mock):
model = AnthropicLargeLanguageModel() model = AnthropicLargeLanguageModel()
response = model.invoke( response = model.invoke(
model='claude-instant-1', model='claude-instant-1.2',
credentials={ credentials={
'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY') 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY')
}, },
...@@ -78,7 +78,7 @@ def test_invoke_stream_model(setup_anthropic_mock): ...@@ -78,7 +78,7 @@ def test_invoke_stream_model(setup_anthropic_mock):
], ],
model_parameters={ model_parameters={
'temperature': 0.0, 'temperature': 0.0,
'max_tokens_to_sample': 100 'max_tokens': 100
}, },
stream=True, stream=True,
user="abc-123" user="abc-123"
...@@ -97,7 +97,7 @@ def test_get_num_tokens(): ...@@ -97,7 +97,7 @@ def test_get_num_tokens():
model = AnthropicLargeLanguageModel() model = AnthropicLargeLanguageModel()
num_tokens = model.get_num_tokens( num_tokens = model.get_num_tokens(
model='claude-instant-1', model='claude-instant-1.2',
credentials={ credentials={
'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY') 'anthropic_api_key': os.environ.get('ANTHROPIC_API_KEY')
}, },
......
...@@ -2,7 +2,7 @@ version: '3.1' ...@@ -2,7 +2,7 @@ version: '3.1'
services: services:
# API service # API service
api: api:
image: langgenius/dify-api:0.5.7 image: langgenius/dify-api:0.5.8
restart: always restart: always
environment: environment:
# Startup mode, 'api' starts the API server. # Startup mode, 'api' starts the API server.
...@@ -135,7 +135,7 @@ services: ...@@ -135,7 +135,7 @@ services:
# worker service # worker service
# The Celery worker for processing the queue. # The Celery worker for processing the queue.
worker: worker:
image: langgenius/dify-api:0.5.7 image: langgenius/dify-api:0.5.8
restart: always restart: always
environment: environment:
# Startup mode, 'worker' starts the Celery worker for processing the queue. # Startup mode, 'worker' starts the Celery worker for processing the queue.
...@@ -206,7 +206,7 @@ services: ...@@ -206,7 +206,7 @@ services:
# Frontend web application. # Frontend web application.
web: web:
image: langgenius/dify-web:0.5.7 image: langgenius/dify-web:0.5.8
restart: always restart: always
environment: environment:
EDITION: SELF_HOSTED EDITION: SELF_HOSTED
......
...@@ -20,7 +20,7 @@ import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' ...@@ -20,7 +20,7 @@ import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
const noDataIcon = ( const noDataIcon = (
<svg width="56" height="56" viewBox="0 0 56 56" fill="none" xmlns="http://www.w3.org/2000/svg"> <svg width="56" height="56" viewBox="0 0 56 56" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10.4998 51.3333V39.6666M10.4998 16.3333V4.66663M4.6665 10.5H16.3332M4.6665 45.5H16.3332M30.3332 6.99996L26.2868 17.5206C25.6287 19.2315 25.2997 20.0869 24.7881 20.8065C24.3346 21.4442 23.7774 22.0014 23.1397 22.4549C22.4202 22.9665 21.5647 23.2955 19.8538 23.9535L9.33317 28L19.8539 32.0464C21.5647 32.7044 22.4202 33.0334 23.1397 33.5451C23.7774 33.9985 24.3346 34.5557 24.7881 35.1934C25.2997 35.913 25.6287 36.7684 26.2868 38.4793L30.3332 49L34.3796 38.4793C35.0376 36.7684 35.3666 35.913 35.8783 35.1934C36.3317 34.5557 36.8889 33.9985 37.5266 33.5451C38.2462 33.0334 39.1016 32.7044 40.8125 32.0464L51.3332 28L40.8125 23.9535C39.1016 23.2955 38.2462 22.9665 37.5266 22.4549C36.8889 22.0014 36.3317 21.4442 35.8783 20.8065C35.3666 20.0869 35.0376 19.2315 34.3796 17.5206L30.3332 6.99996Z" stroke="#EAECF0" strokeWidth="3" strokeLinecap="round" strokeLinejoin="round"/> <path d="M10.4998 51.3333V39.6666M10.4998 16.3333V4.66663M4.6665 10.5H16.3332M4.6665 45.5H16.3332M30.3332 6.99996L26.2868 17.5206C25.6287 19.2315 25.2997 20.0869 24.7881 20.8065C24.3346 21.4442 23.7774 22.0014 23.1397 22.4549C22.4202 22.9665 21.5647 23.2955 19.8538 23.9535L9.33317 28L19.8539 32.0464C21.5647 32.7044 22.4202 33.0334 23.1397 33.5451C23.7774 33.9985 24.3346 34.5557 24.7881 35.1934C25.2997 35.913 25.6287 36.7684 26.2868 38.4793L30.3332 49L34.3796 38.4793C35.0376 36.7684 35.3666 35.913 35.8783 35.1934C36.3317 34.5557 36.8889 33.9985 37.5266 33.5451C38.2462 33.0334 39.1016 32.7044 40.8125 32.0464L51.3332 28L40.8125 23.9535C39.1016 23.2955 38.2462 22.9665 37.5266 22.4549C36.8889 22.0014 36.3317 21.4442 35.8783 20.8065C35.3666 20.0869 35.0376 19.2315 34.3796 17.5206L30.3332 6.99996Z" stroke="#EAECF0" strokeWidth="3" strokeLinecap="round" strokeLinejoin="round" />
</svg> </svg>
) )
...@@ -33,9 +33,9 @@ export type IGetAutomaticResProps = { ...@@ -33,9 +33,9 @@ export type IGetAutomaticResProps = {
const genIcon = ( const genIcon = (
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg"> <svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3.6665 1.33332C3.6665 0.965133 3.36803 0.666656 2.99984 0.666656C2.63165 0.666656 2.33317 0.965133 2.33317 1.33332V2.33332H1.33317C0.964981 2.33332 0.666504 2.6318 0.666504 2.99999C0.666504 3.36818 0.964981 3.66666 1.33317 3.66666H2.33317V4.66666C2.33317 5.03485 2.63165 5.33332 2.99984 5.33332C3.36803 5.33332 3.6665 5.03485 3.6665 4.66666V3.66666H4.6665C5.03469 3.66666 5.33317 3.36818 5.33317 2.99999C5.33317 2.6318 5.03469 2.33332 4.6665 2.33332H3.6665V1.33332Z" fill="white"/> <path d="M3.6665 1.33332C3.6665 0.965133 3.36803 0.666656 2.99984 0.666656C2.63165 0.666656 2.33317 0.965133 2.33317 1.33332V2.33332H1.33317C0.964981 2.33332 0.666504 2.6318 0.666504 2.99999C0.666504 3.36818 0.964981 3.66666 1.33317 3.66666H2.33317V4.66666C2.33317 5.03485 2.63165 5.33332 2.99984 5.33332C3.36803 5.33332 3.6665 5.03485 3.6665 4.66666V3.66666H4.6665C5.03469 3.66666 5.33317 3.36818 5.33317 2.99999C5.33317 2.6318 5.03469 2.33332 4.6665 2.33332H3.6665V1.33332Z" fill="white" />
<path d="M3.6665 11.3333C3.6665 10.9651 3.36803 10.6667 2.99984 10.6667C2.63165 10.6667 2.33317 10.9651 2.33317 11.3333V12.3333H1.33317C0.964981 12.3333 0.666504 12.6318 0.666504 13C0.666504 13.3682 0.964981 13.6667 1.33317 13.6667H2.33317V14.6667C2.33317 15.0348 2.63165 15.3333 2.99984 15.3333C3.36803 15.3333 3.6665 15.0348 3.6665 14.6667V13.6667H4.6665C5.03469 13.6667 5.33317 13.3682 5.33317 13C5.33317 12.6318 5.03469 12.3333 4.6665 12.3333H3.6665V11.3333Z" fill="white"/> <path d="M3.6665 11.3333C3.6665 10.9651 3.36803 10.6667 2.99984 10.6667C2.63165 10.6667 2.33317 10.9651 2.33317 11.3333V12.3333H1.33317C0.964981 12.3333 0.666504 12.6318 0.666504 13C0.666504 13.3682 0.964981 13.6667 1.33317 13.6667H2.33317V14.6667C2.33317 15.0348 2.63165 15.3333 2.99984 15.3333C3.36803 15.3333 3.6665 15.0348 3.6665 14.6667V13.6667H4.6665C5.03469 13.6667 5.33317 13.3682 5.33317 13C5.33317 12.6318 5.03469 12.3333 4.6665 12.3333H3.6665V11.3333Z" fill="white" />
<path d="M9.28873 1.76067C9.18971 1.50321 8.94235 1.33332 8.6665 1.33332C8.39066 1.33332 8.1433 1.50321 8.04427 1.76067L6.88815 4.76658C6.68789 5.28727 6.62495 5.43732 6.53887 5.55838C6.4525 5.67986 6.34637 5.78599 6.2249 5.87236C6.10384 5.95844 5.95379 6.02137 5.43309 6.22164L2.42718 7.37776C2.16972 7.47678 1.99984 7.72414 1.99984 7.99999C1.99984 8.27584 2.16972 8.5232 2.42718 8.62222L5.43309 9.77834C5.95379 9.97861 6.10384 10.0415 6.2249 10.1276C6.34637 10.214 6.4525 10.3201 6.53887 10.4416C6.62495 10.5627 6.68789 10.7127 6.88816 11.2334L8.04427 14.2393C8.1433 14.4968 8.39066 14.6667 8.6665 14.6667C8.94235 14.6667 9.18971 14.4968 9.28873 14.2393L10.4449 11.2334C10.6451 10.7127 10.7081 10.5627 10.7941 10.4416C10.8805 10.3201 10.9866 10.214 11.1081 10.1276C11.2292 10.0415 11.3792 9.97861 11.8999 9.77834L14.9058 8.62222C15.1633 8.5232 15.3332 8.27584 15.3332 7.99999C15.3332 7.72414 15.1633 7.47678 14.9058 7.37776L11.8999 6.22164C11.3792 6.02137 11.2292 5.95844 11.1081 5.87236C10.9866 5.78599 10.8805 5.67986 10.7941 5.55838C10.7081 5.43732 10.6451 5.28727 10.4449 4.76658L9.28873 1.76067Z" fill="white"/> <path d="M9.28873 1.76067C9.18971 1.50321 8.94235 1.33332 8.6665 1.33332C8.39066 1.33332 8.1433 1.50321 8.04427 1.76067L6.88815 4.76658C6.68789 5.28727 6.62495 5.43732 6.53887 5.55838C6.4525 5.67986 6.34637 5.78599 6.2249 5.87236C6.10384 5.95844 5.95379 6.02137 5.43309 6.22164L2.42718 7.37776C2.16972 7.47678 1.99984 7.72414 1.99984 7.99999C1.99984 8.27584 2.16972 8.5232 2.42718 8.62222L5.43309 9.77834C5.95379 9.97861 6.10384 10.0415 6.2249 10.1276C6.34637 10.214 6.4525 10.3201 6.53887 10.4416C6.62495 10.5627 6.68789 10.7127 6.88816 11.2334L8.04427 14.2393C8.1433 14.4968 8.39066 14.6667 8.6665 14.6667C8.94235 14.6667 9.18971 14.4968 9.28873 14.2393L10.4449 11.2334C10.6451 10.7127 10.7081 10.5627 10.7941 10.4416C10.8805 10.3201 10.9866 10.214 11.1081 10.1276C11.2292 10.0415 11.3792 9.97861 11.8999 9.77834L14.9058 8.62222C15.1633 8.5232 15.3332 8.27584 15.3332 7.99999C15.3332 7.72414 15.1633 7.47678 14.9058 7.37776L11.8999 6.22164C11.3792 6.02137 11.2292 5.95844 11.1081 5.87236C10.9866 5.78599 10.8805 5.67986 10.7941 5.55838C10.7081 5.43732 10.6451 5.28727 10.4449 4.76658L9.28873 1.76067Z" fill="white" />
</svg> </svg>
) )
...@@ -74,14 +74,14 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({ ...@@ -74,14 +74,14 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
const [res, setRes] = React.useState<AutomaticRes | null>(null) const [res, setRes] = React.useState<AutomaticRes | null>(null)
const renderLoading = ( const renderLoading = (
<div className='grow flex flex-col items-center justify-center h-full space-y-3'> <div className='w-0 grow flex flex-col items-center justify-center h-full space-y-3'>
<Loading /> <Loading />
<div className='text-[13px] text-gray-400'>{t('appDebug.automatic.loading')}</div> <div className='text-[13px] text-gray-400'>{t('appDebug.automatic.loading')}</div>
</div> </div>
) )
const renderNoData = ( const renderNoData = (
<div className='grow flex flex-col items-center justify-center h-full space-y-3'> <div className='w-0 grow flex flex-col items-center px-8 justify-center h-full space-y-3'>
{noDataIcon} {noDataIcon}
<div className='text-[13px] text-gray-400'>{t('appDebug.automatic.noData')}</div> <div className='text-[13px] text-gray-400'>{t('appDebug.automatic.noData')}</div>
</div> </div>
...@@ -142,7 +142,7 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({ ...@@ -142,7 +142,7 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
<div className='text-[13px] font-normal text-gray-500'>{t('appDebug.automatic.description')}</div> <div className='text-[13px] font-normal text-gray-500'>{t('appDebug.automatic.description')}</div>
</div> </div>
{/* inputs */} {/* inputs */}
<div className='mt-12 space-y-5'> <div className='mt-2 space-y-5'>
<div className='space-y-2'> <div className='space-y-2'>
<div className='text-[13px] font-medium text-gray-900'>{t('appDebug.automatic.intendedAudience')}</div> <div className='text-[13px] font-medium text-gray-900'>{t('appDebug.automatic.intendedAudience')}</div>
<input className="w-full h-8 px-3 text-[13px] font-normal bg-gray-50 rounded-lg" placeholder={t('appDebug.automatic.intendedAudiencePlaceHolder') as string} value={audiences} onChange={e => setAudiences(e.target.value)} /> <input className="w-full h-8 px-3 text-[13px] font-normal bg-gray-50 rounded-lg" placeholder={t('appDebug.automatic.intendedAudiencePlaceHolder') as string} value={audiences} onChange={e => setAudiences(e.target.value)} />
...@@ -167,8 +167,8 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({ ...@@ -167,8 +167,8 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
</div>} </div>}
{(!isLoading && res) && ( {(!isLoading && res) && (
<div className='grow px-8 pt-6 h-full overflow-y-auto'> <div className='w-0 grow px-8 pt-6 h-full overflow-y-auto'>
<div className='mb-4 w-1/2 text-lg font-medium text-gray-900'>{t('appDebug.automatic.resTitle')}</div> <div className='mb-4 text-lg font-medium text-gray-900'>{t('appDebug.automatic.resTitle')}</div>
<ConfigPrompt <ConfigPrompt
mode={mode} mode={mode}
...@@ -196,7 +196,7 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({ ...@@ -196,7 +196,7 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
</div> </div>
)} )}
<div className='sticky bottom-0 flex justify-end right-0 py-4'> <div className='sticky bottom-0 flex justify-end right-0 py-4 bg-white'>
<Button onClick={onClose}>{t('common.operation.cancel')}</Button> <Button onClick={onClose}>{t('common.operation.cancel')}</Button>
<Button type='primary' className='ml-2' onClick={() => { <Button type='primary' className='ml-2' onClick={() => {
setShowConfirmOverwrite(true) setShowConfirmOverwrite(true)
......
...@@ -40,6 +40,7 @@ const TextToSpeech: FC = () => { ...@@ -40,6 +40,7 @@ const TextToSpeech: FC = () => {
{ languageInfo?.example && ( { languageInfo?.example && (
<AudioBtn <AudioBtn
value={languageInfo?.example} value={languageInfo?.example}
voice={voiceItem?.value}
isAudition={true} isAudition={true}
/> />
)} )}
......
...@@ -9,12 +9,14 @@ import { textToAudio } from '@/service/share' ...@@ -9,12 +9,14 @@ import { textToAudio } from '@/service/share'
type AudioBtnProps = { type AudioBtnProps = {
value: string value: string
voice?: string
className?: string className?: string
isAudition?: boolean isAudition?: boolean
} }
const AudioBtn = ({ const AudioBtn = ({
value, value,
voice,
className, className,
isAudition, isAudition,
}: AudioBtnProps) => { }: AudioBtnProps) => {
...@@ -27,13 +29,16 @@ const AudioBtn = ({ ...@@ -27,13 +29,16 @@ const AudioBtn = ({
const pathname = usePathname() const pathname = usePathname()
const removeCodeBlocks = (inputText: any) => { const removeCodeBlocks = (inputText: any) => {
const codeBlockRegex = /```[\s\S]*?```/g const codeBlockRegex = /```[\s\S]*?```/g
if (inputText)
return inputText.replace(codeBlockRegex, '') return inputText.replace(codeBlockRegex, '')
return ''
} }
const playAudio = async () => { const playAudio = async () => {
const formData = new FormData() const formData = new FormData()
if (value !== '') { if (value !== '') {
formData.append('text', removeCodeBlocks(value)) formData.append('text', removeCodeBlocks(value))
formData.append('voice', removeCodeBlocks(voice))
let url = '' let url = ''
let isPublic = false let isPublic = false
...@@ -56,13 +61,14 @@ const AudioBtn = ({ ...@@ -56,13 +61,14 @@ const AudioBtn = ({
const audioUrl = URL.createObjectURL(blob) const audioUrl = URL.createObjectURL(blob)
const audio = new Audio(audioUrl) const audio = new Audio(audioUrl)
audioRef.current = audio audioRef.current = audio
audio.play().then(() => { audio.play().then(() => {}).catch(() => {
setIsPlaying(true)
}).catch(() => {
setIsPlaying(false) setIsPlaying(false)
URL.revokeObjectURL(audioUrl) URL.revokeObjectURL(audioUrl)
}) })
audio.onended = () => setHasEnded(true) audio.onended = () => {
setHasEnded(true)
setIsPlaying(false)
}
} }
catch (error) { catch (error) {
setIsPlaying(false) setIsPlaying(false)
...@@ -70,23 +76,33 @@ const AudioBtn = ({ ...@@ -70,23 +76,33 @@ const AudioBtn = ({
} }
} }
} }
const togglePlayPause = () => { const togglePlayPause = () => {
if (audioRef.current) { if (audioRef.current) {
if (isPlaying) { if (isPlaying) {
if (!hasEnded) {
setPause(false)
audioRef.current.play()
}
if (!isPause) {
setPause(true) setPause(true)
audioRef.current.pause() audioRef.current.pause()
} }
else if (!hasEnded) { }
else if (!isPlaying) {
if (isPause) {
setPause(false) setPause(false)
audioRef.current.play() audioRef.current.play()
} }
else if (!isPlaying) { else {
setHasEnded(false)
playAudio().then() playAudio().then()
} }
}
setIsPlaying(prevIsPlaying => !prevIsPlaying) setIsPlaying(prevIsPlaying => !prevIsPlaying)
} }
else { else {
setIsPlaying(true)
if (!isPlaying)
playAudio().then() playAudio().then()
} }
} }
...@@ -102,7 +118,7 @@ const AudioBtn = ({ ...@@ -102,7 +118,7 @@ const AudioBtn = ({
className={`box-border p-0.5 flex items-center justify-center cursor-pointer ${isAudition || 'rounded-md bg-white'}`} className={`box-border p-0.5 flex items-center justify-center cursor-pointer ${isAudition || 'rounded-md bg-white'}`}
style={{ boxShadow: !isAudition ? '0px 4px 8px -2px rgba(16, 24, 40, 0.1), 0px 2px 4px -2px rgba(16, 24, 40, 0.06)' : '' }} style={{ boxShadow: !isAudition ? '0px 4px 8px -2px rgba(16, 24, 40, 0.1), 0px 2px 4px -2px rgba(16, 24, 40, 0.06)' : '' }}
onClick={togglePlayPause}> onClick={togglePlayPause}>
<div className={`w-6 h-6 rounded-md ${!isAudition ? 'hover:bg-gray-200' : 'hover:bg-gray-50'} ${!isPause ? ((isPlaying && !hasEnded) ? s.playIcon : s.stopIcon) : s.pauseIcon}`}></div> <div className={`w-6 h-6 rounded-md ${!isAudition ? 'hover:bg-gray-200' : 'hover:bg-gray-50'} ${(isPlaying && !hasEnded) ? s.pauseIcon : s.playIcon}`}></div>
</div> </div>
</Tooltip> </Tooltip>
</div> </div>
......
...@@ -8,9 +8,3 @@ ...@@ -8,9 +8,3 @@
background-position: center; background-position: center;
background-repeat: no-repeat; background-repeat: no-repeat;
} }
.stopIcon {
background-position: center;
background-repeat: no-repeat;
background-image: url(~@/app/components/develop/secret-key/assets/stop.svg);
}
\ No newline at end of file
...@@ -77,6 +77,7 @@ const Operation: FC<OperationProps> = ({ ...@@ -77,6 +77,7 @@ const Operation: FC<OperationProps> = ({
{(!isOpeningStatement && config?.text_to_speech?.enabled) && ( {(!isOpeningStatement && config?.text_to_speech?.enabled) && (
<AudioBtn <AudioBtn
value={content} value={content}
voice={config?.text_to_speech?.voice}
className='hidden group-hover:block' className='hidden group-hover:block'
/> />
)} )}
......
...@@ -16,6 +16,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = { ...@@ -16,6 +16,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = {
teamMembers: 1, teamMembers: 1,
buildApps: 10, buildApps: 10,
vectorSpace: 5, vectorSpace: 5,
documentsUploadQuota: 50,
documentProcessingPriority: Priority.standard, documentProcessingPriority: Priority.standard,
logHistory: 30, logHistory: 30,
customTools: unAvailable, customTools: unAvailable,
...@@ -32,6 +33,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = { ...@@ -32,6 +33,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = {
teamMembers: 3, teamMembers: 3,
buildApps: 50, buildApps: 50,
vectorSpace: 200, vectorSpace: 200,
documentsUploadQuota: 500,
documentProcessingPriority: Priority.priority, documentProcessingPriority: Priority.priority,
logHistory: NUM_INFINITE, logHistory: NUM_INFINITE,
customTools: 10, customTools: 10,
...@@ -48,6 +50,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = { ...@@ -48,6 +50,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = {
teamMembers: NUM_INFINITE, teamMembers: NUM_INFINITE,
buildApps: NUM_INFINITE, buildApps: NUM_INFINITE,
vectorSpace: 1000, vectorSpace: 1000,
documentsUploadQuota: 1000,
documentProcessingPriority: Priority.topPriority, documentProcessingPriority: Priority.topPriority,
logHistory: NUM_INFINITE, logHistory: NUM_INFINITE,
customTools: NUM_INFINITE, customTools: NUM_INFINITE,
...@@ -64,6 +67,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = { ...@@ -64,6 +67,7 @@ export const ALL_PLANS: Record<Plan, PlanInfo> = {
teamMembers: NUM_INFINITE, teamMembers: NUM_INFINITE,
buildApps: NUM_INFINITE, buildApps: NUM_INFINITE,
vectorSpace: NUM_INFINITE, vectorSpace: NUM_INFINITE,
documentsUploadQuota: NUM_INFINITE,
documentProcessingPriority: Priority.topPriority, documentProcessingPriority: Priority.topPriority,
logHistory: NUM_INFINITE, logHistory: NUM_INFINITE,
customTools: NUM_INFINITE, customTools: NUM_INFINITE,
......
...@@ -129,6 +129,9 @@ const PlanItem: FC<Props> = ({ ...@@ -129,6 +129,9 @@ const PlanItem: FC<Props> = ({
<div className='mt-3.5 flex items-center space-x-1'> <div className='mt-3.5 flex items-center space-x-1'>
<div>+ {t('billing.plansCommon.supportItems.logoChange')}</div> <div>+ {t('billing.plansCommon.supportItems.logoChange')}</div>
</div> </div>
<div className='mt-3.5 flex items-center space-x-1'>
<div>+ {t('billing.plansCommon.supportItems.bulkUpload')}</div>
</div>
<div className='mt-3.5 flex items-center space-x-1'> <div className='mt-3.5 flex items-center space-x-1'>
<div className='flex items-center'> <div className='flex items-center'>
+ +
...@@ -264,6 +267,10 @@ const PlanItem: FC<Props> = ({ ...@@ -264,6 +267,10 @@ const PlanItem: FC<Props> = ({
value={planInfo.vectorSpace === NUM_INFINITE ? t('billing.plansCommon.unlimited') as string : (planInfo.vectorSpace >= 1000 ? `${planInfo.vectorSpace / 1000}G` : `${planInfo.vectorSpace}MB`)} value={planInfo.vectorSpace === NUM_INFINITE ? t('billing.plansCommon.unlimited') as string : (planInfo.vectorSpace >= 1000 ? `${planInfo.vectorSpace / 1000}G` : `${planInfo.vectorSpace}MB`)}
tooltip={t('billing.plansCommon.vectorSpaceBillingTooltip') as string} tooltip={t('billing.plansCommon.vectorSpaceBillingTooltip') as string}
/> />
<KeyValue
label={t('billing.plansCommon.documentsUploadQuota')}
value={planInfo.vectorSpace === NUM_INFINITE ? t('billing.plansCommon.unlimited') as string : planInfo.documentsUploadQuota}
/>
<KeyValue <KeyValue
label={t('billing.plansCommon.documentProcessingPriority')} label={t('billing.plansCommon.documentProcessingPriority')}
value={t(`billing.plansCommon.priority.${planInfo.documentProcessingPriority}`) as string} value={t(`billing.plansCommon.priority.${planInfo.documentProcessingPriority}`) as string}
......
...@@ -17,6 +17,7 @@ export type PlanInfo = { ...@@ -17,6 +17,7 @@ export type PlanInfo = {
teamMembers: number teamMembers: number
buildApps: number buildApps: number
vectorSpace: number vectorSpace: number
documentsUploadQuota: number
documentProcessingPriority: Priority documentProcessingPriority: Priority
logHistory: number logHistory: number
customTools: string | number customTools: string | number
......
...@@ -13,6 +13,7 @@ import { fetchFileUploadConfig } from '@/service/common' ...@@ -13,6 +13,7 @@ import { fetchFileUploadConfig } from '@/service/common'
import { fetchSupportFileTypes } from '@/service/datasets' import { fetchSupportFileTypes } from '@/service/datasets'
import I18n from '@/context/i18n' import I18n from '@/context/i18n'
import { LanguagesSupported } from '@/i18n/language' import { LanguagesSupported } from '@/i18n/language'
import { IS_CE_EDITION } from '@/config'
const FILES_NUMBER_LIMIT = 20 const FILES_NUMBER_LIMIT = 20
...@@ -23,6 +24,7 @@ type IFileUploaderProps = { ...@@ -23,6 +24,7 @@ type IFileUploaderProps = {
onFileUpdate: (fileItem: FileItem, progress: number, list: FileItem[]) => void onFileUpdate: (fileItem: FileItem, progress: number, list: FileItem[]) => void
onFileListUpdate?: (files: FileItem[]) => void onFileListUpdate?: (files: FileItem[]) => void
onPreview: (file: File) => void onPreview: (file: File) => void
notSupportBatchUpload?: boolean
} }
const FileUploader = ({ const FileUploader = ({
...@@ -32,6 +34,7 @@ const FileUploader = ({ ...@@ -32,6 +34,7 @@ const FileUploader = ({
onFileUpdate, onFileUpdate,
onFileListUpdate, onFileListUpdate,
onPreview, onPreview,
notSupportBatchUpload,
}: IFileUploaderProps) => { }: IFileUploaderProps) => {
const { t } = useTranslation() const { t } = useTranslation()
const { notify } = useContext(ToastContext) const { notify } = useContext(ToastContext)
...@@ -40,6 +43,7 @@ const FileUploader = ({ ...@@ -40,6 +43,7 @@ const FileUploader = ({
const dropRef = useRef<HTMLDivElement>(null) const dropRef = useRef<HTMLDivElement>(null)
const dragRef = useRef<HTMLDivElement>(null) const dragRef = useRef<HTMLDivElement>(null)
const fileUploader = useRef<HTMLInputElement>(null) const fileUploader = useRef<HTMLInputElement>(null)
const hideUpload = notSupportBatchUpload && fileList.length > 0
const { data: fileUploadConfigResponse } = useSWR({ url: '/files/upload' }, fetchFileUploadConfig) const { data: fileUploadConfigResponse } = useSWR({ url: '/files/upload' }, fetchFileUploadConfig)
const { data: supportFileTypesResponse } = useSWR({ url: '/files/support-type' }, fetchSupportFileTypes) const { data: supportFileTypesResponse } = useSWR({ url: '/files/support-type' }, fetchSupportFileTypes)
...@@ -131,7 +135,7 @@ const FileUploader = ({ ...@@ -131,7 +135,7 @@ const FileUploader = ({
xhr: new XMLHttpRequest(), xhr: new XMLHttpRequest(),
data: formData, data: formData,
onprogress: onProgress, onprogress: onProgress,
}) }, false, undefined, '?source=datasets')
.then((res: File) => { .then((res: File) => {
const completeFile = { const completeFile = {
fileID: fileItem.fileID, fileID: fileItem.fileID,
...@@ -143,8 +147,8 @@ const FileUploader = ({ ...@@ -143,8 +147,8 @@ const FileUploader = ({
onFileUpdate(completeFile, 100, fileListCopy) onFileUpdate(completeFile, 100, fileListCopy)
return Promise.resolve({ ...completeFile }) return Promise.resolve({ ...completeFile })
}) })
.catch(() => { .catch((e) => {
notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.failed') }) notify({ type: 'error', message: e?.response?.code === 'forbidden' ? e?.response?.message : t('datasetCreation.stepOne.uploader.failed') })
onFileUpdate(fileItem, -2, fileListCopy) onFileUpdate(fileItem, -2, fileListCopy)
return Promise.resolve({ ...fileItem }) return Promise.resolve({ ...fileItem })
}) })
...@@ -177,7 +181,7 @@ const FileUploader = ({ ...@@ -177,7 +181,7 @@ const FileUploader = ({
if (!files.length) if (!files.length)
return false return false
if (files.length + fileList.length > FILES_NUMBER_LIMIT) { if (files.length + fileList.length > FILES_NUMBER_LIMIT && !IS_CE_EDITION) {
notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.filesNumber', { filesNumber: FILES_NUMBER_LIMIT }) }) notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.filesNumber', { filesNumber: FILES_NUMBER_LIMIT }) })
return false return false
} }
...@@ -252,16 +256,21 @@ const FileUploader = ({ ...@@ -252,16 +256,21 @@ const FileUploader = ({
return ( return (
<div className={s.fileUploader}> <div className={s.fileUploader}>
{!hideUpload && (
<input <input
ref={fileUploader} ref={fileUploader}
id="fileUploader" id="fileUploader"
style={{ display: 'none' }} style={{ display: 'none' }}
type="file" type="file"
multiple multiple={!notSupportBatchUpload}
accept={ACCEPTS.join(',')} accept={ACCEPTS.join(',')}
onChange={fileChangeHandle} onChange={fileChangeHandle}
/> />
)}
<div className={cn(s.title, titleClassName)}>{t('datasetCreation.stepOne.uploader.title')}</div> <div className={cn(s.title, titleClassName)}>{t('datasetCreation.stepOne.uploader.title')}</div>
{!hideUpload && (
<div ref={dropRef} className={cn(s.uploader, dragging && s.dragging)}> <div ref={dropRef} className={cn(s.uploader, dragging && s.dragging)}>
<div className='flex justify-center items-center min-h-6 mb-2'> <div className='flex justify-center items-center min-h-6 mb-2'>
<span className={s.uploadIcon} /> <span className={s.uploadIcon} />
...@@ -276,6 +285,7 @@ const FileUploader = ({ ...@@ -276,6 +285,7 @@ const FileUploader = ({
})}</div> })}</div>
{dragging && <div ref={dragRef} className={s.draggingCover} />} {dragging && <div ref={dragRef} className={s.draggingCover} />}
</div> </div>
)}
<div className={s.fileList}> <div className={s.fileList}>
{fileList.map((fileItem, index) => ( {fileList.map((fileItem, index) => (
<div <div
......
...@@ -39,7 +39,7 @@ export const NotionConnector = ({ onSetting }: NotionConnectorProps) => { ...@@ -39,7 +39,7 @@ export const NotionConnector = ({ onSetting }: NotionConnectorProps) => {
return ( return (
<div className={s.notionConnectionTip}> <div className={s.notionConnectionTip}>
<span className={s.notionIcon}/> <span className={s.notionIcon} />
<div className={s.title}>{t('datasetCreation.stepOne.notionSyncTitle')}</div> <div className={s.title}>{t('datasetCreation.stepOne.notionSyncTitle')}</div>
<div className={s.tip}>{t('datasetCreation.stepOne.notionSyncTip')}</div> <div className={s.tip}>{t('datasetCreation.stepOne.notionSyncTip')}</div>
<Button className='h-8' type='primary' onClick={onSetting}>{t('datasetCreation.stepOne.connect')}</Button> <Button className='h-8' type='primary' onClick={onSetting}>{t('datasetCreation.stepOne.connect')}</Button>
...@@ -92,7 +92,7 @@ const StepOne = ({ ...@@ -92,7 +92,7 @@ const StepOne = ({
const hasNotin = notionPages.length > 0 const hasNotin = notionPages.length > 0
const isVectorSpaceFull = plan.usage.vectorSpace >= plan.total.vectorSpace const isVectorSpaceFull = plan.usage.vectorSpace >= plan.total.vectorSpace
const isShowVectorSpaceFull = (allFileLoaded || hasNotin) && isVectorSpaceFull && enableBilling const isShowVectorSpaceFull = (allFileLoaded || hasNotin) && isVectorSpaceFull && enableBilling
const notSupportBatchUpload = enableBilling && plan.type === 'sandbox'
const nextDisabled = useMemo(() => { const nextDisabled = useMemo(() => {
if (!files.length) if (!files.length)
return true return true
...@@ -169,6 +169,7 @@ const StepOne = ({ ...@@ -169,6 +169,7 @@ const StepOne = ({
onFileListUpdate={updateFileList} onFileListUpdate={updateFileList}
onFileUpdate={updateFile} onFileUpdate={updateFile}
onPreview={updateCurrentFile} onPreview={updateCurrentFile}
notSupportBatchUpload={notSupportBatchUpload}
/> />
{isShowVectorSpaceFull && ( {isShowVectorSpaceFull && (
<div className='max-w-[640px] mb-4'> <div className='max-w-[640px] mb-4'>
......
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg"> <svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_129_107)"> <g clip-path="url(#clip0_129_107)">
<path d="M7.99991 14.6666C11.6819 14.6666 14.6666 11.6819 14.6666 7.99998C14.6666 4.31808 11.6819 1.33331 7.99998 1.33331C4.31808 1.33331 1.33331 4.31808 1.33331 7.99998C1.33331 11.6819 4.31808 14.6666 7.99998 14.6666Z" stroke="#155EEF" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/> <path d="M7.99998 14.6666C11.6819 14.6666 14.6666 11.6819 14.6666 7.99998C14.6666 4.31808 11.6819 1.33331 7.99998 1.33331C4.31808 1.33331 1.33331 4.31808 1.33331 7.99998C1.33331 11.6819 4.31808 14.6666 7.99998 14.6666Z" stroke="#667085" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6.66665 5.33331L10.6666 7.99998L6.66665 10.6666V5.33331Z" stroke="#155EEF" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/> <path d="M6.66665 5.33331L10.6666 7.99998L6.66665 10.6666V5.33331Z" stroke="#667085" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</g> </g>
<defs> <defs>
<clipPath id="clip0_129_107"> <clipPath id="clip0_129_107">
......
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_129_107)">
<path d="M7.99998 14.6666C11.6819 14.6666 14.6666 11.6819 14.6666 7.99998C14.6666 4.31808 11.6819 1.33331 7.99998 1.33331C4.31808 1.33331 1.33331 4.31808 1.33331 7.99998C1.33331 11.6819 4.31808 14.6666 7.99998 14.6666Z" stroke="#667085" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M6.66665 5.33331L10.6666 7.99998L6.66665 10.6666V5.33331Z" stroke="#667085" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</g>
<defs>
<clipPath id="clip0_129_107">
<rect width="16" height="16" fill="white"/>
</clipPath>
</defs>
</svg>
...@@ -26,7 +26,7 @@ const ModelIcon: FC<ModelIconProps> = ({ ...@@ -26,7 +26,7 @@ const ModelIcon: FC<ModelIconProps> = ({
return ( return (
<img <img
alt='model-icon' alt='model-icon'
src={`${provider.icon_small[language]}?_token=${localStorage.getItem('console_token')}`} src={`${provider.icon_small[language] || provider.icon_small.en_US}?_token=${localStorage.getItem('console_token')}`}
className={`w-4 h-4 ${className}`} className={`w-4 h-4 ${className}`}
/> />
) )
......
...@@ -69,7 +69,7 @@ const Form: FC<FormProps> = ({ ...@@ -69,7 +69,7 @@ const Form: FC<FormProps> = ({
<Tooltip popupContent={ <Tooltip popupContent={
// w-[100px] caused problem // w-[100px] caused problem
<div className=''> <div className=''>
{tooltip[language]} {tooltip[language] || tooltip.en_US}
</div> </div>
} > } >
<HelpCircle className='w-3 h-3 text-gray-500' /> <HelpCircle className='w-3 h-3 text-gray-500' />
...@@ -91,7 +91,7 @@ const Form: FC<FormProps> = ({ ...@@ -91,7 +91,7 @@ const Form: FC<FormProps> = ({
return ( return (
<div key={variable} className='py-3'> <div key={variable} className='py-3'>
<div className='py-2 text-sm text-gray-900'> <div className='py-2 text-sm text-gray-900'>
{label[language]} {label[language] || label.en_US}
{ {
required && ( required && (
<span className='ml-1 text-red-500'>*</span> <span className='ml-1 text-red-500'>*</span>
...@@ -104,7 +104,7 @@ const Form: FC<FormProps> = ({ ...@@ -104,7 +104,7 @@ const Form: FC<FormProps> = ({
value={(isShowDefaultValue && ((value[variable] as string) === '' || value[variable] === undefined || value[variable] === null)) ? formSchema.default : value[variable]} value={(isShowDefaultValue && ((value[variable] as string) === '' || value[variable] === undefined || value[variable] === null)) ? formSchema.default : value[variable]}
onChange={val => handleFormChange(variable, val)} onChange={val => handleFormChange(variable, val)}
validated={validatedSuccess} validated={validatedSuccess}
placeholder={placeholder?.[language]} placeholder={placeholder?.[language] || placeholder?.en_US}
disabled={disabed} disabled={disabed}
type={formSchema.type === FormTypeEnum.textNumber ? 'number' : 'text'} type={formSchema.type === FormTypeEnum.textNumber ? 'number' : 'text'}
{...(formSchema.type === FormTypeEnum.textNumber ? { min: (formSchema as CredentialFormSchemaNumberInput).min, max: (formSchema as CredentialFormSchemaNumberInput).max } : {})} {...(formSchema.type === FormTypeEnum.textNumber ? { min: (formSchema as CredentialFormSchemaNumberInput).min, max: (formSchema as CredentialFormSchemaNumberInput).max } : {})}
...@@ -132,7 +132,7 @@ const Form: FC<FormProps> = ({ ...@@ -132,7 +132,7 @@ const Form: FC<FormProps> = ({
return ( return (
<div key={variable} className='py-3'> <div key={variable} className='py-3'>
<div className='py-2 text-sm text-gray-900'> <div className='py-2 text-sm text-gray-900'>
{label[language]} {label[language] || label.en_US}
{ {
required && ( required && (
<span className='ml-1 text-red-500'>*</span> <span className='ml-1 text-red-500'>*</span>
...@@ -188,7 +188,7 @@ const Form: FC<FormProps> = ({ ...@@ -188,7 +188,7 @@ const Form: FC<FormProps> = ({
return ( return (
<div key={variable} className='py-3'> <div key={variable} className='py-3'>
<div className='py-2 text-sm text-gray-900'> <div className='py-2 text-sm text-gray-900'>
{label[language]} {label[language] || label.en_US}
{ {
required && ( required && (
...@@ -230,7 +230,7 @@ const Form: FC<FormProps> = ({ ...@@ -230,7 +230,7 @@ const Form: FC<FormProps> = ({
<div key={variable} className='py-3'> <div key={variable} className='py-3'>
<div className='flex items-center justify-between py-2 text-sm text-gray-900'> <div className='flex items-center justify-between py-2 text-sm text-gray-900'>
<div className='flex items-center space-x-2'> <div className='flex items-center space-x-2'>
<span>{label[language]}</span> <span>{label[language] || label.en_US}</span>
{tooltipContent} {tooltipContent}
</div> </div>
<Radio.Group <Radio.Group
......
...@@ -246,12 +246,12 @@ const ModelModal: FC<ModelModalProps> = ({ ...@@ -246,12 +246,12 @@ const ModelModal: FC<ModelModalProps> = ({
(provider.help && (provider.help.title || provider.help.url)) (provider.help && (provider.help.title || provider.help.url))
? ( ? (
<a <a
href={provider.help?.url[language]} href={provider.help?.url[language] || provider.help?.url.en_US}
target='_blank' rel='noopener noreferrer' target='_blank' rel='noopener noreferrer'
className='inline-flex items-center text-xs text-primary-600' className='inline-flex items-center text-xs text-primary-600'
onClick={e => !provider.help.url && e.preventDefault()} onClick={e => !provider.help.url && e.preventDefault()}
> >
{provider.help.title?.[language] || provider.help.url[language]} {provider.help.title?.[language] || provider.help.url[language] || provider.help.title?.en_US || provider.help.url.en_US}
<LinkExternal02 className='ml-1 w-3 h-3' /> <LinkExternal02 className='ml-1 w-3 h-3' />
</a> </a>
) )
......
...@@ -34,7 +34,6 @@ const ModelName: FC<ModelNameProps> = ({ ...@@ -34,7 +34,6 @@ const ModelName: FC<ModelNameProps> = ({
if (!modelItem) if (!modelItem)
return null return null
return ( return (
<div <div
className={` className={`
...@@ -44,9 +43,9 @@ const ModelName: FC<ModelNameProps> = ({ ...@@ -44,9 +43,9 @@ const ModelName: FC<ModelNameProps> = ({
> >
<div <div
className='mr-1 truncate' className='mr-1 truncate'
title={modelItem.label[language]} title={modelItem.label[language] || modelItem.label.en_US}
> >
{modelItem.label[language]} {modelItem.label[language] || modelItem.label.en_US}
</div> </div>
{ {
showModelType && modelItem.model_type && ( showModelType && modelItem.model_type && (
......
...@@ -218,16 +218,16 @@ const ParameterItem: FC<ParameterItemProps> = ({ ...@@ -218,16 +218,16 @@ const ParameterItem: FC<ParameterItemProps> = ({
<div className='shrink-0 flex items-center w-[200px]'> <div className='shrink-0 flex items-center w-[200px]'>
<div <div
className='mr-0.5 text-[13px] font-medium text-gray-700 truncate' className='mr-0.5 text-[13px] font-medium text-gray-700 truncate'
title={parameterRule.label[language]} title={parameterRule.label[language] || parameterRule.label.en_US}
> >
{parameterRule.label[language]} {parameterRule.label[language] || parameterRule.label.en_US}
</div> </div>
{ {
parameterRule.help && ( parameterRule.help && (
<Tooltip <Tooltip
selector={`model-parameter-rule-${parameterRule.name}`} selector={`model-parameter-rule-${parameterRule.name}`}
htmlContent={( htmlContent={(
<div className='w-[200px] whitespace-pre-wrap'>{parameterRule.help[language]}</div> <div className='w-[200px] whitespace-pre-wrap'>{parameterRule.help[language] || parameterRule.help.en_US}</div>
)} )}
> >
<HelpCircle className='mr-1.5 w-3.5 h-3.5 text-gray-400' /> <HelpCircle className='mr-1.5 w-3.5 h-3.5 text-gray-400' />
......
...@@ -65,7 +65,7 @@ const PopupItem: FC<PopupItemProps> = ({ ...@@ -65,7 +65,7 @@ const PopupItem: FC<PopupItemProps> = ({
return ( return (
<div className='mb-1'> <div className='mb-1'>
<div className='flex items-center px-3 h-[22px] text-xs font-medium text-gray-500'> <div className='flex items-center px-3 h-[22px] text-xs font-medium text-gray-500'>
{model.label[language]} {model.label[language] || model.label.en_US}
</div> </div>
{ {
model.models.map(modelItem => ( model.models.map(modelItem => (
......
...@@ -23,7 +23,22 @@ const Popup: FC<PopupProps> = ({ ...@@ -23,7 +23,22 @@ const Popup: FC<PopupProps> = ({
const language = useLanguage() const language = useLanguage()
const [searchText, setSearchText] = useState('') const [searchText, setSearchText] = useState('')
const filteredModelList = modelList.filter(model => model.models.filter(modelItem => modelItem.label[language].includes(searchText)).length) const filteredModelList = modelList.filter(
model => model.models.filter(
(modelItem) => {
if (modelItem.label[language] !== undefined)
return modelItem.label[language].includes(searchText)
let found = false
Object.keys(modelItem.label).forEach((key) => {
if (modelItem.label[key].includes(searchText))
found = true
})
return found
},
).length,
)
return ( return (
<div className='w-[320px] max-h-[480px] rounded-lg border-[0.5px] border-gray-200 bg-white shadow-lg overflow-y-auto'> <div className='w-[320px] max-h-[480px] rounded-lg border-[0.5px] border-gray-200 bg-white shadow-lg overflow-y-auto'>
......
...@@ -69,7 +69,7 @@ const ProviderCard: FC<ProviderCardProps> = ({ ...@@ -69,7 +69,7 @@ const ProviderCard: FC<ProviderCardProps> = ({
</div> </div>
{ {
provider.description && ( provider.description && (
<div className='mt-1 leading-4 text-xs text-black/[48]'>{provider.description[language]}</div> <div className='mt-1 leading-4 text-xs text-black/[48]'>{provider.description[language] || provider.description.en_US}</div>
) )
} }
</div> </div>
......
...@@ -16,7 +16,7 @@ const ProviderIcon: FC<ProviderIconProps> = ({ ...@@ -16,7 +16,7 @@ const ProviderIcon: FC<ProviderIconProps> = ({
return ( return (
<img <img
alt='provider-icon' alt='provider-icon'
src={`${provider.icon_large[language]}?_token=${localStorage.getItem('console_token')}`} src={`${provider.icon_large[language] || provider.icon_large.en_US}?_token=${localStorage.getItem('console_token')}`}
className={`w-auto h-6 ${className}`} className={`w-auto h-6 ${className}`}
/> />
) )
......
...@@ -2,10 +2,9 @@ ...@@ -2,10 +2,9 @@
import type { FC } from 'react' import type { FC } from 'react'
import React, { useEffect, useState } from 'react' import React, { useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import produce from 'immer'
import { useDebounce, useGetState } from 'ahooks' import { useDebounce, useGetState } from 'ahooks'
import { clone } from 'lodash-es'
import cn from 'classnames' import cn from 'classnames'
import produce from 'immer'
import { LinkExternal02, Settings01 } from '../../base/icons/src/vender/line/general' import { LinkExternal02, Settings01 } from '../../base/icons/src/vender/line/general'
import type { Credential, CustomCollectionBackend, CustomParamSchema, Emoji } from '../types' import type { Credential, CustomCollectionBackend, CustomParamSchema, Emoji } from '../types'
import { AuthHeaderPrefix, AuthType } from '../types' import { AuthHeaderPrefix, AuthType } from '../types'
...@@ -116,14 +115,16 @@ const EditCustomCollectionModal: FC<Props> = ({ ...@@ -116,14 +115,16 @@ const EditCustomCollectionModal: FC<Props> = ({
const [isShowTestApi, setIsShowTestApi] = useState(false) const [isShowTestApi, setIsShowTestApi] = useState(false)
const handleSave = () => { const handleSave = () => {
const postData = clone(customCollection) // const postData = clone(customCollection)
delete postData.tools const postData = produce(customCollection, (draft) => {
delete draft.tools
if (postData.credentials.auth_type === AuthType.none) { if (draft.credentials.auth_type === AuthType.none) {
delete postData.credentials.api_key_header delete draft.credentials.api_key_header
delete postData.credentials.api_key_header_prefix delete draft.credentials.api_key_header_prefix
delete postData.credentials.api_key_value delete draft.credentials.api_key_value
} }
})
if (isAdd) { if (isAdd) {
onAdd?.(postData) onAdd?.(postData)
......
...@@ -42,6 +42,7 @@ const TestApi: FC<Props> = ({ ...@@ -42,6 +42,7 @@ const TestApi: FC<Props> = ({
delete credentials.api_key_value delete credentials.api_key_value
} }
const data = { const data = {
provider_name: customCollection.provider,
tool_name: toolName, tool_name: toolName,
credentials, credentials,
schema_type: customCollection.schema_type, schema_type: customCollection.schema_type,
......
...@@ -32,6 +32,7 @@ const translation = { ...@@ -32,6 +32,7 @@ const translation = {
vectorSpace: 'Vector Space', vectorSpace: 'Vector Space',
vectorSpaceBillingTooltip: 'Each 1MB can store about 1.2million characters of vectorized data(estimated using OpenAI Embeddings, varies across models).', vectorSpaceBillingTooltip: 'Each 1MB can store about 1.2million characters of vectorized data(estimated using OpenAI Embeddings, varies across models).',
vectorSpaceTooltip: 'Vector Space is the long-term memory system required for LLMs to comprehend your data.', vectorSpaceTooltip: 'Vector Space is the long-term memory system required for LLMs to comprehend your data.',
documentsUploadQuota: 'Documents Upload Quota',
documentProcessingPriority: 'Document Processing Priority', documentProcessingPriority: 'Document Processing Priority',
documentProcessingPriorityTip: 'For higher document processing priority, please upgrade your plan.', documentProcessingPriorityTip: 'For higher document processing priority, please upgrade your plan.',
documentProcessingPriorityUpgrade: 'Process more data with higher accuracy at faster speeds.', documentProcessingPriorityUpgrade: 'Process more data with higher accuracy at faster speeds.',
...@@ -56,6 +57,7 @@ const translation = { ...@@ -56,6 +57,7 @@ const translation = {
dedicatedAPISupport: 'Dedicated API support', dedicatedAPISupport: 'Dedicated API support',
customIntegration: 'Custom integration and support', customIntegration: 'Custom integration and support',
ragAPIRequest: 'RAG API Requests', ragAPIRequest: 'RAG API Requests',
bulkUpload: 'Bulk upload documents',
agentMode: 'Agent Mode', agentMode: 'Agent Mode',
workflow: 'Workflow', workflow: 'Workflow',
}, },
......
...@@ -32,6 +32,7 @@ const translation = { ...@@ -32,6 +32,7 @@ const translation = {
vectorSpace: '向量空间', vectorSpace: '向量空间',
vectorSpaceTooltip: '向量空间是 LLMs 理解您的数据所需的长期记忆系统。', vectorSpaceTooltip: '向量空间是 LLMs 理解您的数据所需的长期记忆系统。',
vectorSpaceBillingTooltip: '向量存储是将知识库向量化处理后为让 LLMs 理解数据而使用的长期记忆存储,1MB 大约能满足1.2 million character 的向量化后数据存储(以 OpenAI Embedding 模型估算,不同模型计算方式有差异)。在向量化过程中,实际的压缩或尺寸减小取决于内容的复杂性和冗余性。', vectorSpaceBillingTooltip: '向量存储是将知识库向量化处理后为让 LLMs 理解数据而使用的长期记忆存储,1MB 大约能满足1.2 million character 的向量化后数据存储(以 OpenAI Embedding 模型估算,不同模型计算方式有差异)。在向量化过程中,实际的压缩或尺寸减小取决于内容的复杂性和冗余性。',
documentsUploadQuota: '文档上传配额',
documentProcessingPriority: '文档处理优先级', documentProcessingPriority: '文档处理优先级',
documentProcessingPriorityTip: '如需更高的文档处理优先级,请升级您的套餐', documentProcessingPriorityTip: '如需更高的文档处理优先级,请升级您的套餐',
documentProcessingPriorityUpgrade: '以更快的速度、更高的精度处理更多的数据。', documentProcessingPriorityUpgrade: '以更快的速度、更高的精度处理更多的数据。',
...@@ -56,6 +57,7 @@ const translation = { ...@@ -56,6 +57,7 @@ const translation = {
dedicatedAPISupport: '专用 API 支持', dedicatedAPISupport: '专用 API 支持',
customIntegration: '自定义集成和支持', customIntegration: '自定义集成和支持',
ragAPIRequest: 'RAG API 请求', ragAPIRequest: 'RAG API 请求',
bulkUpload: '批量上传文档',
agentMode: '代理模式', agentMode: '代理模式',
workflow: '工作流', workflow: '工作流',
}, },
......
{ {
"name": "dify-web", "name": "dify-web",
"version": "0.5.7", "version": "0.5.8",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "next dev", "dev": "next dev",
......
...@@ -308,7 +308,7 @@ const baseFetch = <T>( ...@@ -308,7 +308,7 @@ const baseFetch = <T>(
]) as Promise<T> ]) as Promise<T>
} }
export const upload = (options: any, isPublicAPI?: boolean, url?: string): Promise<any> => { export const upload = (options: any, isPublicAPI?: boolean, url?: string, searchParams?: string): Promise<any> => {
const urlPrefix = isPublicAPI ? PUBLIC_API_PREFIX : API_PREFIX const urlPrefix = isPublicAPI ? PUBLIC_API_PREFIX : API_PREFIX
let token = '' let token = ''
if (isPublicAPI) { if (isPublicAPI) {
...@@ -329,7 +329,7 @@ export const upload = (options: any, isPublicAPI?: boolean, url?: string): Promi ...@@ -329,7 +329,7 @@ export const upload = (options: any, isPublicAPI?: boolean, url?: string): Promi
} }
const defaultOptions = { const defaultOptions = {
method: 'POST', method: 'POST',
url: url ? `${urlPrefix}${url}` : `${urlPrefix}/files/upload`, url: (url ? `${urlPrefix}${url}` : `${urlPrefix}/files/upload`) + (searchParams || ''),
headers: { headers: {
Authorization: `Bearer ${token}`, Authorization: `Bearer ${token}`,
}, },
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment