Commit afd8996b authored by John Wang's avatar John Wang

Merge branch 'main' into feat/universal-chat

# Conflicts:
#	api/core/completion.py
#	api/core/tool/dataset_index_tool.py
#	api/requirements.txt
parents 3f5177b6 28ba7214
...@@ -17,9 +17,15 @@ A single API encompassing plugin capabilities, context enhancement, and more, sa ...@@ -17,9 +17,15 @@ A single API encompassing plugin capabilities, context enhancement, and more, sa
Visual data analysis, log review, and annotation for applications Visual data analysis, log review, and annotation for applications
Dify is compatible with Langchain, meaning we'll gradually support multiple LLMs, currently supported: Dify is compatible with Langchain, meaning we'll gradually support multiple LLMs, currently supported:
- GPT 3 (text-davinci-003) * **OpenAI** :GPT4、GPT3.5-turbo、GPT3.5-turbo-16k、text-davinci-003
- GPT 3.5 Turbo(ChatGPT)
- GPT-4 * **Azure OpenAI**
* **Antropic**:Claude2、Claude-instant
> We've got 1000 free trial credits available for all cloud service users to try out the Claude model.Visit [Dify.ai](https://dify.ai) and
try it now.
* **hugging face Hub**:Coming soon.
## Use Cloud Services ## Use Cloud Services
......
...@@ -17,11 +17,16 @@ ...@@ -17,11 +17,16 @@
- 一套 API 即可包含插件、上下文增强等能力,替你省下了后端代码的编写工作 - 一套 API 即可包含插件、上下文增强等能力,替你省下了后端代码的编写工作
- 可视化的对应用进行数据分析,查阅日志或进行标注 - 可视化的对应用进行数据分析,查阅日志或进行标注
Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前已支持 Dify 兼容 Langchain,这意味着我们将逐步支持多种 LLMs ,目前支持的模型供应商
- GPT 3 (text-davinci-003) * **OpenAI**:GPT4、GPT3.5-turbo、GPT3.5-turbo-16k、text-davinci-003
- GPT 3.5 Turbo(ChatGPT)
- GPT-4 * **Azure OpenAI Service**
* **Anthropic**:Claude2、Claude-instant
> 我们为所有注册云端版的用户免费提供了 1000 次 Claude 模型的消息调用额度,登录 [dify.ai](https://cloud.dify.ai) 即可使用。
* **Hugging Face Hub**(即将推出)
## 使用云服务 ## 使用云服务
......
...@@ -8,13 +8,19 @@ EDITION=SELF_HOSTED ...@@ -8,13 +8,19 @@ EDITION=SELF_HOSTED
SECRET_KEY= SECRET_KEY=
# Console API base URL # Console API base URL
CONSOLE_URL=http://127.0.0.1:5001 CONSOLE_API_URL=http://127.0.0.1:5001
# Console frontend web base URL
CONSOLE_WEB_URL=http://127.0.0.1:3000
# Service API base URL # Service API base URL
API_URL=http://127.0.0.1:5001 SERVICE_API_URL=http://127.0.0.1:5001
# Web APP API base URL
APP_API_URL=http://127.0.0.1:5001
# Web APP base URL # Web APP frontend web base URL
APP_URL=http://127.0.0.1:3000 APP_WEB_URL=http://127.0.0.1:3000
# celery configuration # celery configuration
CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1 CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1
...@@ -79,6 +85,11 @@ WEAVIATE_BATCH_SIZE=100 ...@@ -79,6 +85,11 @@ WEAVIATE_BATCH_SIZE=100
QDRANT_URL=path:storage/qdrant QDRANT_URL=path:storage/qdrant
QDRANT_API_KEY=your-qdrant-api-key QDRANT_API_KEY=your-qdrant-api-key
# Mail configuration, support: resend
MAIL_TYPE=
MAIL_DEFAULT_SEND_FROM=no-reply <no-reply@dify.ai>
RESEND_API_KEY=
# Sentry configuration # Sentry configuration
SENTRY_DSN= SENTRY_DSN=
......
...@@ -5,9 +5,11 @@ LABEL maintainer="takatost@gmail.com" ...@@ -5,9 +5,11 @@ LABEL maintainer="takatost@gmail.com"
ENV FLASK_APP app.py ENV FLASK_APP app.py
ENV EDITION SELF_HOSTED ENV EDITION SELF_HOSTED
ENV DEPLOY_ENV PRODUCTION ENV DEPLOY_ENV PRODUCTION
ENV CONSOLE_URL http://127.0.0.1:5001 ENV CONSOLE_API_URL http://127.0.0.1:5001
ENV API_URL http://127.0.0.1:5001 ENV CONSOLE_WEB_URL http://127.0.0.1:3000
ENV APP_URL http://127.0.0.1:5001 ENV SERVICE_API_URL http://127.0.0.1:5001
ENV APP_API_URL http://127.0.0.1:5001
ENV APP_WEB_URL http://127.0.0.1:3000
EXPOSE 5001 EXPOSE 5001
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
import os import os
from datetime import datetime from datetime import datetime
from werkzeug.exceptions import Forbidden
if not os.environ.get("DEBUG") or os.environ.get("DEBUG").lower() != 'true': if not os.environ.get("DEBUG") or os.environ.get("DEBUG").lower() != 'true':
from gevent import monkey from gevent import monkey
monkey.patch_all() monkey.patch_all()
...@@ -15,7 +17,7 @@ import flask_login ...@@ -15,7 +17,7 @@ import flask_login
from flask_cors import CORS from flask_cors import CORS
from extensions import ext_session, ext_celery, ext_sentry, ext_redis, ext_login, ext_migrate, \ from extensions import ext_session, ext_celery, ext_sentry, ext_redis, ext_login, ext_migrate, \
ext_database, ext_storage ext_database, ext_storage, ext_mail
from extensions.ext_database import db from extensions.ext_database import db
from extensions.ext_login import login_manager from extensions.ext_login import login_manager
...@@ -27,7 +29,7 @@ from events import event_handlers ...@@ -27,7 +29,7 @@ from events import event_handlers
import core import core
from config import Config, CloudEditionConfig from config import Config, CloudEditionConfig
from commands import register_commands from commands import register_commands
from models.account import TenantAccountJoin from models.account import TenantAccountJoin, AccountStatus
from models.model import Account, EndUser, App from models.model import Account, EndUser, App
import warnings import warnings
...@@ -83,6 +85,7 @@ def initialize_extensions(app): ...@@ -83,6 +85,7 @@ def initialize_extensions(app):
ext_celery.init_app(app) ext_celery.init_app(app)
ext_session.init_app(app) ext_session.init_app(app)
ext_login.init_app(app) ext_login.init_app(app)
ext_mail.init_app(app)
ext_sentry.init_app(app) ext_sentry.init_app(app)
...@@ -100,6 +103,9 @@ def load_user(user_id): ...@@ -100,6 +103,9 @@ def load_user(user_id):
account = db.session.query(Account).filter(Account.id == account_id).first() account = db.session.query(Account).filter(Account.id == account_id).first()
if account: if account:
if account.status == AccountStatus.BANNED.value or account.status == AccountStatus.CLOSED.value:
raise Forbidden('Account is banned or closed.')
workspace_id = session.get('workspace_id') workspace_id = session.get('workspace_id')
if workspace_id: if workspace_id:
tenant_account_join = db.session.query(TenantAccountJoin).filter( tenant_account_join = db.session.query(TenantAccountJoin).filter(
...@@ -149,6 +155,10 @@ def register_blueprints(app): ...@@ -149,6 +155,10 @@ def register_blueprints(app):
from controllers.web import bp as web_bp from controllers.web import bp as web_bp
from controllers.console import bp as console_app_bp from controllers.console import bp as console_app_bp
CORS(service_api_bp,
allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],
methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH']
)
app.register_blueprint(service_api_bp) app.register_blueprint(service_api_bp)
CORS(web_bp, CORS(web_bp,
......
...@@ -18,7 +18,8 @@ from models.model import Account ...@@ -18,7 +18,8 @@ from models.model import Account
import secrets import secrets
import base64 import base64
from models.provider import Provider from models.provider import Provider, ProviderName
from services.provider_service import ProviderService
@click.command('reset-password', help='Reset the account password.') @click.command('reset-password', help='Reset the account password.')
...@@ -193,9 +194,40 @@ def recreate_all_dataset_indexes(): ...@@ -193,9 +194,40 @@ def recreate_all_dataset_indexes():
click.echo(click.style('Congratulations! Recreate {} dataset indexes.'.format(recreate_count), fg='green')) click.echo(click.style('Congratulations! Recreate {} dataset indexes.'.format(recreate_count), fg='green'))
@click.command('sync-anthropic-hosted-providers', help='Sync anthropic hosted providers.')
def sync_anthropic_hosted_providers():
click.echo(click.style('Start sync anthropic hosted providers.', fg='green'))
count = 0
page = 1
while True:
try:
tenants = db.session.query(Tenant).order_by(Tenant.created_at.desc()).paginate(page=page, per_page=50)
except NotFound:
break
page += 1
for tenant in tenants:
try:
click.echo('Syncing tenant anthropic hosted provider: {}'.format(tenant.id))
ProviderService.create_system_provider(
tenant,
ProviderName.ANTHROPIC.value,
current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'],
True
)
count += 1
except Exception as e:
click.echo(click.style('Sync tenant anthropic hosted provider error: {} {}'.format(e.__class__.__name__, str(e)), fg='red'))
continue
click.echo(click.style('Congratulations! Synced {} anthropic hosted providers.'.format(count), fg='green'))
def register_commands(app): def register_commands(app):
app.cli.add_command(reset_password) app.cli.add_command(reset_password)
app.cli.add_command(reset_email) app.cli.add_command(reset_email)
app.cli.add_command(generate_invitation_codes) app.cli.add_command(generate_invitation_codes)
app.cli.add_command(reset_encrypt_key_pair) app.cli.add_command(reset_encrypt_key_pair)
app.cli.add_command(recreate_all_dataset_indexes) app.cli.add_command(recreate_all_dataset_indexes)
app.cli.add_command(sync_anthropic_hosted_providers)
...@@ -28,9 +28,11 @@ DEFAULTS = { ...@@ -28,9 +28,11 @@ DEFAULTS = {
'SESSION_REDIS_USE_SSL': 'False', 'SESSION_REDIS_USE_SSL': 'False',
'OAUTH_REDIRECT_PATH': '/console/api/oauth/authorize', 'OAUTH_REDIRECT_PATH': '/console/api/oauth/authorize',
'OAUTH_REDIRECT_INDEX_PATH': '/', 'OAUTH_REDIRECT_INDEX_PATH': '/',
'CONSOLE_URL': 'https://cloud.dify.ai', 'CONSOLE_WEB_URL': 'https://cloud.dify.ai',
'API_URL': 'https://api.dify.ai', 'CONSOLE_API_URL': 'https://cloud.dify.ai',
'APP_URL': 'https://udify.app', 'SERVICE_API_URL': 'https://api.dify.ai',
'APP_WEB_URL': 'https://udify.app',
'APP_API_URL': 'https://udify.app',
'STORAGE_TYPE': 'local', 'STORAGE_TYPE': 'local',
'STORAGE_LOCAL_PATH': 'storage', 'STORAGE_LOCAL_PATH': 'storage',
'CHECK_UPDATE_URL': 'https://updates.dify.ai', 'CHECK_UPDATE_URL': 'https://updates.dify.ai',
...@@ -48,7 +50,10 @@ DEFAULTS = { ...@@ -48,7 +50,10 @@ DEFAULTS = {
'PDF_PREVIEW': 'True', 'PDF_PREVIEW': 'True',
'LOG_LEVEL': 'INFO', 'LOG_LEVEL': 'INFO',
'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False', 'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False',
'DEFAULT_LLM_PROVIDER': 'openai' 'DEFAULT_LLM_PROVIDER': 'openai',
'OPENAI_HOSTED_QUOTA_LIMIT': 200,
'ANTHROPIC_HOSTED_QUOTA_LIMIT': 1000,
'TENANT_DOCUMENT_COUNT': 100
} }
...@@ -76,10 +81,15 @@ class Config: ...@@ -76,10 +81,15 @@ class Config:
def __init__(self): def __init__(self):
# app settings # app settings
self.CONSOLE_API_URL = get_env('CONSOLE_URL') if get_env('CONSOLE_URL') else get_env('CONSOLE_API_URL')
self.CONSOLE_WEB_URL = get_env('CONSOLE_URL') if get_env('CONSOLE_URL') else get_env('CONSOLE_WEB_URL')
self.SERVICE_API_URL = get_env('API_URL') if get_env('API_URL') else get_env('SERVICE_API_URL')
self.APP_WEB_URL = get_env('APP_URL') if get_env('APP_URL') else get_env('APP_WEB_URL')
self.APP_API_URL = get_env('APP_URL') if get_env('APP_URL') else get_env('APP_API_URL')
self.CONSOLE_URL = get_env('CONSOLE_URL') self.CONSOLE_URL = get_env('CONSOLE_URL')
self.API_URL = get_env('API_URL') self.API_URL = get_env('API_URL')
self.APP_URL = get_env('APP_URL') self.APP_URL = get_env('APP_URL')
self.CURRENT_VERSION = "0.3.7" self.CURRENT_VERSION = "0.3.9"
self.COMMIT_SHA = get_env('COMMIT_SHA') self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED" self.EDITION = "SELF_HOSTED"
self.DEPLOY_ENV = get_env('DEPLOY_ENV') self.DEPLOY_ENV = get_env('DEPLOY_ENV')
...@@ -147,10 +157,15 @@ class Config: ...@@ -147,10 +157,15 @@ class Config:
# cors settings # cors settings
self.CONSOLE_CORS_ALLOW_ORIGINS = get_cors_allow_origins( self.CONSOLE_CORS_ALLOW_ORIGINS = get_cors_allow_origins(
'CONSOLE_CORS_ALLOW_ORIGINS', self.CONSOLE_URL) 'CONSOLE_CORS_ALLOW_ORIGINS', self.CONSOLE_WEB_URL)
self.WEB_API_CORS_ALLOW_ORIGINS = get_cors_allow_origins( self.WEB_API_CORS_ALLOW_ORIGINS = get_cors_allow_origins(
'WEB_API_CORS_ALLOW_ORIGINS', '*') 'WEB_API_CORS_ALLOW_ORIGINS', '*')
# mail settings
self.MAIL_TYPE = get_env('MAIL_TYPE')
self.MAIL_DEFAULT_SEND_FROM = get_env('MAIL_DEFAULT_SEND_FROM')
self.RESEND_API_KEY = get_env('RESEND_API_KEY')
# sentry settings # sentry settings
self.SENTRY_DSN = get_env('SENTRY_DSN') self.SENTRY_DSN = get_env('SENTRY_DSN')
self.SENTRY_TRACES_SAMPLE_RATE = float(get_env('SENTRY_TRACES_SAMPLE_RATE')) self.SENTRY_TRACES_SAMPLE_RATE = float(get_env('SENTRY_TRACES_SAMPLE_RATE'))
...@@ -179,6 +194,10 @@ class Config: ...@@ -179,6 +194,10 @@ class Config:
# hosted provider credentials # hosted provider credentials
self.OPENAI_API_KEY = get_env('OPENAI_API_KEY') self.OPENAI_API_KEY = get_env('OPENAI_API_KEY')
self.ANTHROPIC_API_KEY = get_env('ANTHROPIC_API_KEY')
self.OPENAI_HOSTED_QUOTA_LIMIT = get_env('OPENAI_HOSTED_QUOTA_LIMIT')
self.ANTHROPIC_HOSTED_QUOTA_LIMIT = get_env('ANTHROPIC_HOSTED_QUOTA_LIMIT')
# By default it is False # By default it is False
# You could disable it for compatibility with certain OpenAPI providers # You could disable it for compatibility with certain OpenAPI providers
...@@ -195,6 +214,8 @@ class Config: ...@@ -195,6 +214,8 @@ class Config:
self.NOTION_INTERNAL_SECRET = get_env('NOTION_INTERNAL_SECRET') self.NOTION_INTERNAL_SECRET = get_env('NOTION_INTERNAL_SECRET')
self.NOTION_INTEGRATION_TOKEN = get_env('NOTION_INTEGRATION_TOKEN') self.NOTION_INTEGRATION_TOKEN = get_env('NOTION_INTEGRATION_TOKEN')
self.TENANT_DOCUMENT_COUNT = get_env('TENANT_DOCUMENT_COUNT')
class CloudEditionConfig(Config): class CloudEditionConfig(Config):
......
...@@ -12,7 +12,7 @@ from . import setup, version, apikey, admin ...@@ -12,7 +12,7 @@ from . import setup, version, apikey, admin
from .app import app, site, completion, model_config, statistic, conversation, message, generator, audio from .app import app, site, completion, model_config, statistic, conversation, message, generator, audio
# Import auth controllers # Import auth controllers
from .auth import login, oauth, data_source_oauth from .auth import login, oauth, data_source_oauth, activate
# Import datasets controllers # Import datasets controllers
from .datasets import datasets, datasets_document, datasets_segments, file, hit_testing, data_source from .datasets import datasets, datasets_document, datasets_segments, file, hit_testing, data_source
......
...@@ -50,8 +50,8 @@ class ChatMessageAudioApi(Resource): ...@@ -50,8 +50,8 @@ class ChatMessageAudioApi(Resource):
raise UnsupportedAudioTypeError() raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError: except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError() raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -63,8 +63,8 @@ class CompletionMessageApi(Resource): ...@@ -63,8 +63,8 @@ class CompletionMessageApi(Resource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -133,8 +133,8 @@ class ChatMessageApi(Resource): ...@@ -133,8 +133,8 @@ class ChatMessageApi(Resource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -164,8 +164,8 @@ def compact_response(response: Union[dict | Generator]) -> Response: ...@@ -164,8 +164,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError: except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -16,7 +16,7 @@ class ProviderNotInitializeError(BaseHTTPException): ...@@ -16,7 +16,7 @@ class ProviderNotInitializeError(BaseHTTPException):
class ProviderQuotaExceededError(BaseHTTPException): class ProviderQuotaExceededError(BaseHTTPException):
error_code = 'provider_quota_exceeded' error_code = 'provider_quota_exceeded'
description = "Your quota for Dify Hosted OpenAI has been exhausted. " \ description = "Your quota for Dify Hosted Model Provider has been exhausted. " \
"Please go to Settings -> Model Provider to complete your own provider credentials." "Please go to Settings -> Model Provider to complete your own provider credentials."
code = 400 code = 400
......
...@@ -27,8 +27,8 @@ class IntroductionGenerateApi(Resource): ...@@ -27,8 +27,8 @@ class IntroductionGenerateApi(Resource):
account.current_tenant_id, account.current_tenant_id,
args['prompt_template'] args['prompt_template']
) )
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -58,8 +58,8 @@ class RuleGenerateApi(Resource): ...@@ -58,8 +58,8 @@ class RuleGenerateApi(Resource):
args['audiences'], args['audiences'],
args['hoping_to_solve'] args['hoping_to_solve']
) )
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -269,8 +269,8 @@ class MessageMoreLikeThisApi(Resource): ...@@ -269,8 +269,8 @@ class MessageMoreLikeThisApi(Resource):
raise NotFound("Message Not Exists.") raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError: except MoreLikeThisDisabledError:
raise AppMoreLikeThisDisabledError() raise AppMoreLikeThisDisabledError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -297,8 +297,8 @@ def compact_response(response: Union[dict | Generator]) -> Response: ...@@ -297,8 +297,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
except MoreLikeThisDisabledError: except MoreLikeThisDisabledError:
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError: except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -339,8 +339,8 @@ class MessageSuggestedQuestionApi(Resource): ...@@ -339,8 +339,8 @@ class MessageSuggestedQuestionApi(Resource):
raise NotFound("Message not found") raise NotFound("Message not found")
except ConversationNotExistsError: except ConversationNotExistsError:
raise NotFound("Conversation not found") raise NotFound("Conversation not found")
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
import base64
import secrets
from datetime import datetime
from flask_restful import Resource, reqparse
from controllers.console import api
from controllers.console.error import AlreadyActivateError
from extensions.ext_database import db
from libs.helper import email, str_len, supported_language, timezone
from libs.password import valid_password, hash_password
from models.account import AccountStatus, Tenant
from services.account_service import RegisterService
class ActivateCheckApi(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('workspace_id', type=str, required=True, nullable=False, location='args')
parser.add_argument('email', type=email, required=True, nullable=False, location='args')
parser.add_argument('token', type=str, required=True, nullable=False, location='args')
args = parser.parse_args()
account = RegisterService.get_account_if_token_valid(args['workspace_id'], args['email'], args['token'])
tenant = db.session.query(Tenant).filter(
Tenant.id == args['workspace_id'],
Tenant.status == 'normal'
).first()
return {'is_valid': account is not None, 'workspace_name': tenant.name}
class ActivateApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('workspace_id', type=str, required=True, nullable=False, location='json')
parser.add_argument('email', type=email, required=True, nullable=False, location='json')
parser.add_argument('token', type=str, required=True, nullable=False, location='json')
parser.add_argument('name', type=str_len(30), required=True, nullable=False, location='json')
parser.add_argument('password', type=valid_password, required=True, nullable=False, location='json')
parser.add_argument('interface_language', type=supported_language, required=True, nullable=False,
location='json')
parser.add_argument('timezone', type=timezone, required=True, nullable=False, location='json')
args = parser.parse_args()
account = RegisterService.get_account_if_token_valid(args['workspace_id'], args['email'], args['token'])
if account is None:
raise AlreadyActivateError()
RegisterService.revoke_token(args['workspace_id'], args['email'], args['token'])
account.name = args['name']
# generate password salt
salt = secrets.token_bytes(16)
base64_salt = base64.b64encode(salt).decode()
# encrypt password with salt
password_hashed = hash_password(args['password'], salt)
base64_password_hashed = base64.b64encode(password_hashed).decode()
account.password = base64_password_hashed
account.password_salt = base64_salt
account.interface_language = args['interface_language']
account.timezone = args['timezone']
account.interface_theme = 'light'
account.status = AccountStatus.ACTIVE.value
account.initialized_at = datetime.utcnow()
db.session.commit()
return {'result': 'success'}
api.add_resource(ActivateCheckApi, '/activate/check')
api.add_resource(ActivateApi, '/activate')
...@@ -20,7 +20,7 @@ def get_oauth_providers(): ...@@ -20,7 +20,7 @@ def get_oauth_providers():
client_secret=current_app.config.get( client_secret=current_app.config.get(
'NOTION_CLIENT_SECRET'), 'NOTION_CLIENT_SECRET'),
redirect_uri=current_app.config.get( redirect_uri=current_app.config.get(
'CONSOLE_URL') + '/console/api/oauth/data-source/callback/notion') 'CONSOLE_API_URL') + '/console/api/oauth/data-source/callback/notion')
OAUTH_PROVIDERS = { OAUTH_PROVIDERS = {
'notion': notion_oauth 'notion': notion_oauth
...@@ -42,7 +42,7 @@ class OAuthDataSource(Resource): ...@@ -42,7 +42,7 @@ class OAuthDataSource(Resource):
if current_app.config.get('NOTION_INTEGRATION_TYPE') == 'internal': if current_app.config.get('NOTION_INTEGRATION_TYPE') == 'internal':
internal_secret = current_app.config.get('NOTION_INTERNAL_SECRET') internal_secret = current_app.config.get('NOTION_INTERNAL_SECRET')
oauth_provider.save_internal_access_token(internal_secret) oauth_provider.save_internal_access_token(internal_secret)
return redirect(f'{current_app.config.get("CONSOLE_URL")}?oauth_data_source=success') return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?oauth_data_source=success')
else: else:
auth_url = oauth_provider.get_authorization_url() auth_url = oauth_provider.get_authorization_url()
return redirect(auth_url) return redirect(auth_url)
...@@ -66,12 +66,12 @@ class OAuthDataSourceCallback(Resource): ...@@ -66,12 +66,12 @@ class OAuthDataSourceCallback(Resource):
f"An error occurred during the OAuthCallback process with {provider}: {e.response.text}") f"An error occurred during the OAuthCallback process with {provider}: {e.response.text}")
return {'error': 'OAuth data source process failed'}, 400 return {'error': 'OAuth data source process failed'}, 400
return redirect(f'{current_app.config.get("CONSOLE_URL")}?oauth_data_source=success') return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?oauth_data_source=success')
elif 'error' in request.args: elif 'error' in request.args:
error = request.args.get('error') error = request.args.get('error')
return redirect(f'{current_app.config.get("CONSOLE_URL")}?oauth_data_source={error}') return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?oauth_data_source={error}')
else: else:
return redirect(f'{current_app.config.get("CONSOLE_URL")}?oauth_data_source=access_denied') return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?oauth_data_source=access_denied')
class OAuthDataSourceSync(Resource): class OAuthDataSourceSync(Resource):
......
...@@ -20,13 +20,13 @@ def get_oauth_providers(): ...@@ -20,13 +20,13 @@ def get_oauth_providers():
client_secret=current_app.config.get( client_secret=current_app.config.get(
'GITHUB_CLIENT_SECRET'), 'GITHUB_CLIENT_SECRET'),
redirect_uri=current_app.config.get( redirect_uri=current_app.config.get(
'CONSOLE_URL') + '/console/api/oauth/authorize/github') 'CONSOLE_API_URL') + '/console/api/oauth/authorize/github')
google_oauth = GoogleOAuth(client_id=current_app.config.get('GOOGLE_CLIENT_ID'), google_oauth = GoogleOAuth(client_id=current_app.config.get('GOOGLE_CLIENT_ID'),
client_secret=current_app.config.get( client_secret=current_app.config.get(
'GOOGLE_CLIENT_SECRET'), 'GOOGLE_CLIENT_SECRET'),
redirect_uri=current_app.config.get( redirect_uri=current_app.config.get(
'CONSOLE_URL') + '/console/api/oauth/authorize/google') 'CONSOLE_API_URL') + '/console/api/oauth/authorize/google')
OAUTH_PROVIDERS = { OAUTH_PROVIDERS = {
'github': github_oauth, 'github': github_oauth,
...@@ -80,7 +80,7 @@ class OAuthCallback(Resource): ...@@ -80,7 +80,7 @@ class OAuthCallback(Resource):
flask_login.login_user(account, remember=True) flask_login.login_user(account, remember=True)
AccountService.update_last_login(account, request) AccountService.update_last_login(account, request)
return redirect(f'{current_app.config.get("CONSOLE_URL")}?oauth_login=success') return redirect(f'{current_app.config.get("CONSOLE_WEB_URL")}?oauth_login=success')
def _get_account_by_openid_or_email(provider: str, user_info: OAuthUserInfo) -> Optional[Account]: def _get_account_by_openid_or_email(provider: str, user_info: OAuthUserInfo) -> Optional[Account]:
......
...@@ -279,8 +279,8 @@ class DatasetDocumentListApi(Resource): ...@@ -279,8 +279,8 @@ class DatasetDocumentListApi(Resource):
try: try:
documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user) documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user)
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -324,8 +324,8 @@ class DatasetInitApi(Resource): ...@@ -324,8 +324,8 @@ class DatasetInitApi(Resource):
document_data=args, document_data=args,
account=current_user account=current_user
) )
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -95,8 +95,8 @@ class HitTestingApi(Resource): ...@@ -95,8 +95,8 @@ class HitTestingApi(Resource):
return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)} return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)}
except services.errors.index.IndexNotInitializedError: except services.errors.index.IndexNotInitializedError:
raise DatasetNotInitializedError() raise DatasetNotInitializedError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -18,3 +18,9 @@ class AccountNotLinkTenantError(BaseHTTPException): ...@@ -18,3 +18,9 @@ class AccountNotLinkTenantError(BaseHTTPException):
error_code = 'account_not_link_tenant' error_code = 'account_not_link_tenant'
description = "Account not link tenant." description = "Account not link tenant."
code = 403 code = 403
class AlreadyActivateError(BaseHTTPException):
error_code = 'already_activate'
description = "Auth Token is invalid or account already activated, please check again."
code = 403
...@@ -47,8 +47,8 @@ class ChatAudioApi(InstalledAppResource): ...@@ -47,8 +47,8 @@ class ChatAudioApi(InstalledAppResource):
raise UnsupportedAudioTypeError() raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError: except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError() raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -54,8 +54,8 @@ class CompletionApi(InstalledAppResource): ...@@ -54,8 +54,8 @@ class CompletionApi(InstalledAppResource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -113,8 +113,8 @@ class ChatApi(InstalledAppResource): ...@@ -113,8 +113,8 @@ class ChatApi(InstalledAppResource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -155,8 +155,8 @@ def compact_response(response: Union[dict | Generator]) -> Response: ...@@ -155,8 +155,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError: except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -107,8 +107,8 @@ class MessageMoreLikeThisApi(InstalledAppResource): ...@@ -107,8 +107,8 @@ class MessageMoreLikeThisApi(InstalledAppResource):
raise NotFound("Message Not Exists.") raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError: except MoreLikeThisDisabledError:
raise AppMoreLikeThisDisabledError() raise AppMoreLikeThisDisabledError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -135,8 +135,8 @@ def compact_response(response: Union[dict | Generator]) -> Response: ...@@ -135,8 +135,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
except MoreLikeThisDisabledError: except MoreLikeThisDisabledError:
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError: except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -174,8 +174,8 @@ class MessageSuggestedQuestionApi(InstalledAppResource): ...@@ -174,8 +174,8 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
raise NotFound("Conversation not found") raise NotFound("Conversation not found")
except SuggestedQuestionsAfterAnswerDisabledError: except SuggestedQuestionsAfterAnswerDisabledError:
raise AppSuggestedQuestionsAfterAnswerDisabledError() raise AppSuggestedQuestionsAfterAnswerDisabledError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -6,22 +6,23 @@ from flask import current_app, request ...@@ -6,22 +6,23 @@ from flask import current_app, request
from flask_login import login_required, current_user from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, fields, marshal_with from flask_restful import Resource, reqparse, fields, marshal_with
from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError
from controllers.console import api from controllers.console import api
from controllers.console.setup import setup_required from controllers.console.setup import setup_required
from controllers.console.workspace.error import AccountAlreadyInitedError, InvalidInvitationCodeError, \ from controllers.console.workspace.error import AccountAlreadyInitedError, InvalidInvitationCodeError, \
RepeatPasswordNotMatchError RepeatPasswordNotMatchError, CurrentPasswordIncorrectError
from controllers.console.wraps import account_initialization_required from controllers.console.wraps import account_initialization_required
from libs.helper import TimestampField, supported_language, timezone from libs.helper import TimestampField, supported_language, timezone
from extensions.ext_database import db from extensions.ext_database import db
from models.account import InvitationCode, AccountIntegrate from models.account import InvitationCode, AccountIntegrate
from services.account_service import AccountService from services.account_service import AccountService
account_fields = { account_fields = {
'id': fields.String, 'id': fields.String,
'name': fields.String, 'name': fields.String,
'avatar': fields.String, 'avatar': fields.String,
'email': fields.String, 'email': fields.String,
'is_password_set': fields.Boolean,
'interface_language': fields.String, 'interface_language': fields.String,
'interface_theme': fields.String, 'interface_theme': fields.String,
'timezone': fields.String, 'timezone': fields.String,
...@@ -194,8 +195,11 @@ class AccountPasswordApi(Resource): ...@@ -194,8 +195,11 @@ class AccountPasswordApi(Resource):
if args['new_password'] != args['repeat_new_password']: if args['new_password'] != args['repeat_new_password']:
raise RepeatPasswordNotMatchError() raise RepeatPasswordNotMatchError()
AccountService.update_account_password( try:
current_user, args['password'], args['new_password']) AccountService.update_account_password(
current_user, args['password'], args['new_password'])
except ServiceCurrentPasswordIncorrectError:
raise CurrentPasswordIncorrectError()
return {"result": "success"} return {"result": "success"}
......
...@@ -7,6 +7,12 @@ class RepeatPasswordNotMatchError(BaseHTTPException): ...@@ -7,6 +7,12 @@ class RepeatPasswordNotMatchError(BaseHTTPException):
code = 400 code = 400
class CurrentPasswordIncorrectError(BaseHTTPException):
error_code = 'current_password_incorrect'
description = "Current password is incorrect."
code = 400
class ProviderRequestFailedError(BaseHTTPException): class ProviderRequestFailedError(BaseHTTPException):
error_code = 'provider_request_failed' error_code = 'provider_request_failed'
description = None description = None
......
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
from flask import current_app
from flask_login import login_required, current_user from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, marshal_with, abort, fields, marshal from flask_restful import Resource, reqparse, marshal_with, abort, fields, marshal
...@@ -60,7 +60,8 @@ class MemberInviteEmailApi(Resource): ...@@ -60,7 +60,8 @@ class MemberInviteEmailApi(Resource):
inviter = current_user inviter = current_user
try: try:
RegisterService.invite_new_member(inviter.current_tenant, invitee_email, role=invitee_role, inviter=inviter) token = RegisterService.invite_new_member(inviter.current_tenant, invitee_email, role=invitee_role,
inviter=inviter)
account = db.session.query(Account, TenantAccountJoin.role).join( account = db.session.query(Account, TenantAccountJoin.role).join(
TenantAccountJoin, Account.id == TenantAccountJoin.account_id TenantAccountJoin, Account.id == TenantAccountJoin.account_id
).filter(Account.email == args['email']).first() ).filter(Account.email == args['email']).first()
...@@ -78,7 +79,16 @@ class MemberInviteEmailApi(Resource): ...@@ -78,7 +79,16 @@ class MemberInviteEmailApi(Resource):
# todo:413 # todo:413
return {'result': 'success', 'account': account}, 201 return {
'result': 'success',
'account': account,
'invite_url': '{}/activate?workspace_id={}&email={}&token={}'.format(
current_app.config.get("CONSOLE_WEB_URL"),
str(current_user.current_tenant_id),
invitee_email,
token
)
}, 201
class MemberCancelInviteApi(Resource): class MemberCancelInviteApi(Resource):
...@@ -88,7 +98,7 @@ class MemberCancelInviteApi(Resource): ...@@ -88,7 +98,7 @@ class MemberCancelInviteApi(Resource):
@login_required @login_required
@account_initialization_required @account_initialization_required
def delete(self, member_id): def delete(self, member_id):
member = Account.query.get(str(member_id)) member = db.session.query(Account).filter(Account.id == str(member_id)).first()
if not member: if not member:
abort(404) abort(404)
......
...@@ -3,6 +3,7 @@ import base64 ...@@ -3,6 +3,7 @@ import base64
import json import json
import logging import logging
from flask import current_app
from flask_login import login_required, current_user from flask_login import login_required, current_user
from flask_restful import Resource, reqparse, abort from flask_restful import Resource, reqparse, abort
from werkzeug.exceptions import Forbidden from werkzeug.exceptions import Forbidden
...@@ -34,7 +35,7 @@ class ProviderListApi(Resource): ...@@ -34,7 +35,7 @@ class ProviderListApi(Resource):
plaintext, the rest is replaced by * and the last two bits are displayed in plaintext plaintext, the rest is replaced by * and the last two bits are displayed in plaintext
""" """
ProviderService.init_supported_provider(current_user.current_tenant, "cloud") ProviderService.init_supported_provider(current_user.current_tenant)
providers = Provider.query.filter_by(tenant_id=tenant_id).all() providers = Provider.query.filter_by(tenant_id=tenant_id).all()
provider_list = [ provider_list = [
...@@ -50,7 +51,8 @@ class ProviderListApi(Resource): ...@@ -50,7 +51,8 @@ class ProviderListApi(Resource):
'quota_used': p.quota_used 'quota_used': p.quota_used
} if p.provider_type == ProviderType.SYSTEM.value else {}), } if p.provider_type == ProviderType.SYSTEM.value else {}),
'token': ProviderService.get_obfuscated_api_key(current_user.current_tenant, 'token': ProviderService.get_obfuscated_api_key(current_user.current_tenant,
ProviderName(p.provider_name)) ProviderName(p.provider_name), only_custom=True)
if p.provider_type == ProviderType.CUSTOM.value else None
} }
for p in providers for p in providers
] ]
...@@ -121,9 +123,10 @@ class ProviderTokenApi(Resource): ...@@ -121,9 +123,10 @@ class ProviderTokenApi(Resource):
is_valid=token_is_valid) is_valid=token_is_valid)
db.session.add(provider_model) db.session.add(provider_model)
if provider_model.is_valid: if provider in [ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value] and provider_model.is_valid:
other_providers = db.session.query(Provider).filter( other_providers = db.session.query(Provider).filter(
Provider.tenant_id == tenant.id, Provider.tenant_id == tenant.id,
Provider.provider_name.in_([ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value]),
Provider.provider_name != provider, Provider.provider_name != provider,
Provider.provider_type == ProviderType.CUSTOM.value Provider.provider_type == ProviderType.CUSTOM.value
).all() ).all()
...@@ -133,7 +136,7 @@ class ProviderTokenApi(Resource): ...@@ -133,7 +136,7 @@ class ProviderTokenApi(Resource):
db.session.commit() db.session.commit()
if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value, if provider in [ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value,
ProviderName.HUGGINGFACEHUB.value]: ProviderName.HUGGINGFACEHUB.value]:
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}, 201 return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}, 201
...@@ -157,7 +160,7 @@ class ProviderTokenValidateApi(Resource): ...@@ -157,7 +160,7 @@ class ProviderTokenValidateApi(Resource):
args = parser.parse_args() args = parser.parse_args()
# todo: remove this when the provider is supported # todo: remove this when the provider is supported
if provider in [ProviderName.ANTHROPIC.value, ProviderName.COHERE.value, if provider in [ProviderName.COHERE.value,
ProviderName.HUGGINGFACEHUB.value]: ProviderName.HUGGINGFACEHUB.value]:
return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'} return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}
...@@ -203,7 +206,19 @@ class ProviderSystemApi(Resource): ...@@ -203,7 +206,19 @@ class ProviderSystemApi(Resource):
provider_model.is_valid = args['is_enabled'] provider_model.is_valid = args['is_enabled']
db.session.commit() db.session.commit()
elif not provider_model: elif not provider_model:
ProviderService.create_system_provider(tenant, provider, args['is_enabled']) if provider == ProviderName.OPENAI.value:
quota_limit = current_app.config['OPENAI_HOSTED_QUOTA_LIMIT']
elif provider == ProviderName.ANTHROPIC.value:
quota_limit = current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT']
else:
quota_limit = 0
ProviderService.create_system_provider(
tenant,
provider,
quota_limit,
args['is_enabled']
)
else: else:
abort(403) abort(403)
......
...@@ -43,8 +43,8 @@ class AudioApi(AppApiResource): ...@@ -43,8 +43,8 @@ class AudioApi(AppApiResource):
raise UnsupportedAudioTypeError() raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError: except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError() raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -54,8 +54,8 @@ class CompletionApi(AppApiResource): ...@@ -54,8 +54,8 @@ class CompletionApi(AppApiResource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -115,8 +115,8 @@ class ChatApi(AppApiResource): ...@@ -115,8 +115,8 @@ class ChatApi(AppApiResource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -156,8 +156,8 @@ def compact_response(response: Union[dict | Generator]) -> Response: ...@@ -156,8 +156,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError: except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
from flask import request
from flask_restful import fields, marshal_with, reqparse from flask_restful import fields, marshal_with, reqparse
from flask_restful.inputs import int_range from flask_restful.inputs import int_range
from werkzeug.exceptions import NotFound from werkzeug.exceptions import NotFound
...@@ -56,16 +57,14 @@ class ConversationDetailApi(AppApiResource): ...@@ -56,16 +57,14 @@ class ConversationDetailApi(AppApiResource):
conversation_id = str(c_id) conversation_id = str(c_id)
parser = reqparse.RequestParser() user = request.get_json().get('user')
parser.add_argument('user', type=str, location='args')
args = parser.parse_args()
if end_user is None and args['user'] is not None: if end_user is None and user is not None:
end_user = create_or_update_end_user_for_user_id(app_model, args['user']) end_user = create_or_update_end_user_for_user_id(app_model, user)
try: try:
ConversationService.delete(app_model, conversation_id, end_user) ConversationService.delete(app_model, conversation_id, end_user)
return {"result": "success"}, 204 return {"result": "success"}
except services.errors.conversation.ConversationNotExistsError: except services.errors.conversation.ConversationNotExistsError:
raise NotFound("Conversation Not Exists.") raise NotFound("Conversation Not Exists.")
...@@ -95,3 +94,4 @@ class ConversationRenameApi(AppApiResource): ...@@ -95,3 +94,4 @@ class ConversationRenameApi(AppApiResource):
api.add_resource(ConversationRenameApi, '/conversations/<uuid:c_id>/name', endpoint='conversation_name') api.add_resource(ConversationRenameApi, '/conversations/<uuid:c_id>/name', endpoint='conversation_name')
api.add_resource(ConversationApi, '/conversations') api.add_resource(ConversationApi, '/conversations')
api.add_resource(ConversationApi, '/conversations/<uuid:c_id>', endpoint='conversation') api.add_resource(ConversationApi, '/conversations/<uuid:c_id>', endpoint='conversation')
api.add_resource(ConversationDetailApi, '/conversations/<uuid:c_id>', endpoint='conversation_detail')
...@@ -85,8 +85,8 @@ class DocumentListApi(DatasetApiResource): ...@@ -85,8 +85,8 @@ class DocumentListApi(DatasetApiResource):
dataset_process_rule=dataset.latest_process_rule, dataset_process_rule=dataset.latest_process_rule,
created_from='api' created_from='api'
) )
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
document = documents[0] document = documents[0]
if doc_type and doc_metadata: if doc_type and doc_metadata:
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type] metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[doc_type]
......
...@@ -45,8 +45,8 @@ class AudioApi(WebApiResource): ...@@ -45,8 +45,8 @@ class AudioApi(WebApiResource):
raise UnsupportedAudioTypeError() raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError: except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError() raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -52,8 +52,8 @@ class CompletionApi(WebApiResource): ...@@ -52,8 +52,8 @@ class CompletionApi(WebApiResource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -109,8 +109,8 @@ class ChatApi(WebApiResource): ...@@ -109,8 +109,8 @@ class ChatApi(WebApiResource):
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
raise AppUnavailableError() raise AppUnavailableError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -150,8 +150,8 @@ def compact_response(response: Union[dict | Generator]) -> Response: ...@@ -150,8 +150,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
except services.errors.app_model_config.AppModelConfigBrokenError: except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.") logging.exception("App model config broken.")
yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(AppUnavailableError()).get_json()) + "\n\n"
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError: except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -101,8 +101,8 @@ class MessageMoreLikeThisApi(WebApiResource): ...@@ -101,8 +101,8 @@ class MessageMoreLikeThisApi(WebApiResource):
raise NotFound("Message Not Exists.") raise NotFound("Message Not Exists.")
except MoreLikeThisDisabledError: except MoreLikeThisDisabledError:
raise AppMoreLikeThisDisabledError() raise AppMoreLikeThisDisabledError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -129,8 +129,8 @@ def compact_response(response: Union[dict | Generator]) -> Response: ...@@ -129,8 +129,8 @@ def compact_response(response: Union[dict | Generator]) -> Response:
yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(NotFound("Message Not Exists.")).get_json()) + "\n\n"
except MoreLikeThisDisabledError: except MoreLikeThisDisabledError:
yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(AppMoreLikeThisDisabledError()).get_json()) + "\n\n"
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderNotInitializeError(ex.description)).get_json()) + "\n\n"
except QuotaExceededError: except QuotaExceededError:
yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n" yield "data: " + json.dumps(api.handle_error(ProviderQuotaExceededError()).get_json()) + "\n\n"
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
...@@ -167,8 +167,8 @@ class MessageSuggestedQuestionApi(WebApiResource): ...@@ -167,8 +167,8 @@ class MessageSuggestedQuestionApi(WebApiResource):
raise NotFound("Conversation not found") raise NotFound("Conversation not found")
except SuggestedQuestionsAfterAnswerDisabledError: except SuggestedQuestionsAfterAnswerDisabledError:
raise AppSuggestedQuestionsAfterAnswerDisabledError() raise AppSuggestedQuestionsAfterAnswerDisabledError()
except ProviderTokenNotInitError: except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError() raise ProviderNotInitializeError(ex.description)
except QuotaExceededError: except QuotaExceededError:
raise ProviderQuotaExceededError() raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError: except ModelCurrentlyNotSupportError:
......
...@@ -13,8 +13,13 @@ class HostedOpenAICredential(BaseModel): ...@@ -13,8 +13,13 @@ class HostedOpenAICredential(BaseModel):
api_key: str api_key: str
class HostedAnthropicCredential(BaseModel):
api_key: str
class HostedLLMCredentials(BaseModel): class HostedLLMCredentials(BaseModel):
openai: Optional[HostedOpenAICredential] = None openai: Optional[HostedOpenAICredential] = None
anthropic: Optional[HostedAnthropicCredential] = None
hosted_llm_credentials = HostedLLMCredentials() hosted_llm_credentials = HostedLLMCredentials()
...@@ -26,3 +31,6 @@ def init_app(app: Flask): ...@@ -26,3 +31,6 @@ def init_app(app: Flask):
if app.config.get("OPENAI_API_KEY"): if app.config.get("OPENAI_API_KEY"):
hosted_llm_credentials.openai = HostedOpenAICredential(api_key=app.config.get("OPENAI_API_KEY")) hosted_llm_credentials.openai = HostedOpenAICredential(api_key=app.config.get("OPENAI_API_KEY"))
if app.config.get("ANTHROPIC_API_KEY"):
hosted_llm_credentials.anthropic = HostedAnthropicCredential(api_key=app.config.get("ANTHROPIC_API_KEY"))
...@@ -46,7 +46,7 @@ class LLMCallbackHandler(BaseCallbackHandler): ...@@ -46,7 +46,7 @@ class LLMCallbackHandler(BaseCallbackHandler):
}) })
self.llm_message.prompt = real_prompts self.llm_message.prompt = real_prompts
self.llm_message.prompt_tokens = self.llm.get_messages_tokens(messages[0]) self.llm_message.prompt_tokens = self.llm.get_num_tokens_from_messages(messages[0])
def on_llm_start( def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
......
...@@ -143,6 +143,7 @@ class Completion: ...@@ -143,6 +143,7 @@ class Completion:
prompt, stop_words = cls.get_main_llm_prompt( prompt, stop_words = cls.get_main_llm_prompt(
mode=mode, mode=mode,
llm=final_llm, llm=final_llm,
model=app_model_config.model_dict,
pre_prompt=app_model_config.pre_prompt, pre_prompt=app_model_config.pre_prompt,
query=query, query=query,
inputs=inputs, inputs=inputs,
...@@ -154,6 +155,7 @@ class Completion: ...@@ -154,6 +155,7 @@ class Completion:
cls.recale_llm_max_tokens( cls.recale_llm_max_tokens(
final_llm=final_llm, final_llm=final_llm,
model=app_model_config.model_dict,
prompt=prompt, prompt=prompt,
mode=mode mode=mode
) )
...@@ -163,16 +165,18 @@ class Completion: ...@@ -163,16 +165,18 @@ class Completion:
return response return response
@classmethod @classmethod
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict, def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, model: dict,
pre_prompt: str, query: str, inputs: dict,
agent_execute_result: Optional[AgentExecuteResult], agent_execute_result: Optional[AgentExecuteResult],
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \ memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]: Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]:
if mode == 'completion': if mode == 'completion':
prompt_template = JinjaPromptTemplate.from_template( prompt_template = JinjaPromptTemplate.from_template(
template=("""Use the following CONTEXT as your learned knowledge: template=("""Use the following context as your learned knowledge, inside <context></context> XML tags.
[CONTEXT]
<context>
{{context}} {{context}}
[END CONTEXT] </context>
When answer to user: When answer to user:
- If you don't know, just say that you don't know. - If you don't know, just say that you don't know.
...@@ -217,10 +221,11 @@ And answer according to the language of the user's question. ...@@ -217,10 +221,11 @@ And answer according to the language of the user's question.
if agent_execute_result: if agent_execute_result:
human_inputs['context'] = agent_execute_result.output human_inputs['context'] = agent_execute_result.output
human_message_prompt += """Use the following CONTEXT as your learned knowledge. human_message_prompt += """Use the following context as your learned knowledge, inside <context></context> XML tags.
[CONTEXT]
<context>
{{context}} {{context}}
[END CONTEXT] </context>
When answer to user: When answer to user:
- If you don't know, just say that you don't know. - If you don't know, just say that you don't know.
...@@ -232,7 +237,7 @@ And answer according to the language of the user's question. ...@@ -232,7 +237,7 @@ And answer according to the language of the user's question.
if pre_prompt: if pre_prompt:
human_message_prompt += pre_prompt human_message_prompt += pre_prompt
query_prompt = "\nHuman: {{query}}\nAI: " query_prompt = "\n\nHuman: {{query}}\n\nAssistant: "
if memory: if memory:
# append chat histories # append chat histories
...@@ -241,12 +246,17 @@ And answer according to the language of the user's question. ...@@ -241,12 +246,17 @@ And answer according to the language of the user's question.
inputs=human_inputs inputs=human_inputs
) )
curr_message_tokens = memory.llm.get_messages_tokens([tmp_human_message]) curr_message_tokens = memory.llm.get_num_tokens_from_messages([tmp_human_message])
rest_tokens = llm_constant.max_context_token_length[memory.llm.model_name] \ model_name = model['name']
- memory.llm.max_tokens - curr_message_tokens max_tokens = model.get("completion_params").get('max_tokens')
rest_tokens = llm_constant.max_context_token_length[model_name] \
- max_tokens - curr_message_tokens
rest_tokens = max(rest_tokens, 0) rest_tokens = max(rest_tokens, 0)
histories = cls.get_history_messages_from_memory(memory, rest_tokens) histories = cls.get_history_messages_from_memory(memory, rest_tokens)
human_message_prompt += "\n\n" + histories human_message_prompt += "\n\n" if human_message_prompt else ""
human_message_prompt += "Here is the chat histories between human and assistant, " \
"inside <histories></histories> XML tags.\n\n<histories>"
human_message_prompt += histories + "</histories>"
human_message_prompt += query_prompt human_message_prompt += query_prompt
...@@ -311,13 +321,15 @@ And answer according to the language of the user's question. ...@@ -311,13 +321,15 @@ And answer according to the language of the user's question.
model=app_model_config.model_dict model=app_model_config.model_dict
) )
model_limited_tokens = llm_constant.max_context_token_length[llm.model_name] model_name = app_model_config.model_dict.get("name")
max_tokens = llm.max_tokens model_limited_tokens = llm_constant.max_context_token_length[model_name]
max_tokens = app_model_config.model_dict.get("completion_params").get('max_tokens')
# get prompt without memory and context # get prompt without memory and context
prompt, _ = cls.get_main_llm_prompt( prompt, _ = cls.get_main_llm_prompt(
mode=mode, mode=mode,
llm=llm, llm=llm,
model=app_model_config.model_dict,
pre_prompt=app_model_config.pre_prompt, pre_prompt=app_model_config.pre_prompt,
query=query, query=query,
inputs=inputs, inputs=inputs,
...@@ -336,16 +348,17 @@ And answer according to the language of the user's question. ...@@ -336,16 +348,17 @@ And answer according to the language of the user's question.
return rest_tokens return rest_tokens
@classmethod @classmethod
def recale_llm_max_tokens(cls, final_llm: Union[StreamableOpenAI, StreamableChatOpenAI], def recale_llm_max_tokens(cls, final_llm: BaseLanguageModel, model: dict,
prompt: Union[str, List[BaseMessage]], mode: str): prompt: Union[str, List[BaseMessage]], mode: str):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit # recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_limited_tokens = llm_constant.max_context_token_length[final_llm.model_name] model_name = model.get("name")
max_tokens = final_llm.max_tokens model_limited_tokens = llm_constant.max_context_token_length[model_name]
max_tokens = model.get("completion_params").get('max_tokens')
if mode == 'completion' and isinstance(final_llm, BaseLLM): if mode == 'completion' and isinstance(final_llm, BaseLLM):
prompt_tokens = final_llm.get_num_tokens(prompt) prompt_tokens = final_llm.get_num_tokens(prompt)
else: else:
prompt_tokens = final_llm.get_messages_tokens(prompt) prompt_tokens = final_llm.get_num_tokens_from_messages(prompt)
if prompt_tokens + max_tokens > model_limited_tokens: if prompt_tokens + max_tokens > model_limited_tokens:
max_tokens = max(model_limited_tokens - prompt_tokens, 16) max_tokens = max(model_limited_tokens - prompt_tokens, 16)
...@@ -354,9 +367,10 @@ And answer according to the language of the user's question. ...@@ -354,9 +367,10 @@ And answer according to the language of the user's question.
@classmethod @classmethod
def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str, def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str,
app_model_config: AppModelConfig, user: Account, streaming: bool): app_model_config: AppModelConfig, user: Account, streaming: bool):
llm: StreamableOpenAI = LLMBuilder.to_llm(
llm = LLMBuilder.to_llm_from_model(
tenant_id=app.tenant_id, tenant_id=app.tenant_id,
model_name='gpt-3.5-turbo', model=app_model_config.model_dict,
streaming=streaming streaming=streaming
) )
...@@ -364,6 +378,7 @@ And answer according to the language of the user's question. ...@@ -364,6 +378,7 @@ And answer according to the language of the user's question.
original_prompt, _ = cls.get_main_llm_prompt( original_prompt, _ = cls.get_main_llm_prompt(
mode="completion", mode="completion",
llm=llm, llm=llm,
model=app_model_config.model_dict,
pre_prompt=pre_prompt, pre_prompt=pre_prompt,
query=message.query, query=message.query,
inputs=message.inputs, inputs=message.inputs,
...@@ -395,6 +410,7 @@ And answer according to the language of the user's question. ...@@ -395,6 +410,7 @@ And answer according to the language of the user's question.
cls.recale_llm_max_tokens( cls.recale_llm_max_tokens(
final_llm=llm, final_llm=llm,
model=app_model_config.model_dict,
prompt=prompt, prompt=prompt,
mode='completion' mode='completion'
) )
......
from _decimal import Decimal from _decimal import Decimal
models = { models = {
'claude-instant-1': 'anthropic', # 100,000 tokens
'claude-2': 'anthropic', # 100,000 tokens
'gpt-4': 'openai', # 8,192 tokens 'gpt-4': 'openai', # 8,192 tokens
'gpt-4-32k': 'openai', # 32,768 tokens 'gpt-4-32k': 'openai', # 32,768 tokens
'gpt-3.5-turbo': 'openai', # 4,096 tokens 'gpt-3.5-turbo': 'openai', # 4,096 tokens
...@@ -10,10 +12,13 @@ models = { ...@@ -10,10 +12,13 @@ models = {
'text-curie-001': 'openai', # 2,049 tokens 'text-curie-001': 'openai', # 2,049 tokens
'text-babbage-001': 'openai', # 2,049 tokens 'text-babbage-001': 'openai', # 2,049 tokens
'text-ada-001': 'openai', # 2,049 tokens 'text-ada-001': 'openai', # 2,049 tokens
'text-embedding-ada-002': 'openai' # 8191 tokens, 1536 dimensions 'text-embedding-ada-002': 'openai', # 8191 tokens, 1536 dimensions
'whisper-1': 'openai'
} }
max_context_token_length = { max_context_token_length = {
'claude-instant-1': 100000,
'claude-2': 100000,
'gpt-4': 8192, 'gpt-4': 8192,
'gpt-4-32k': 32768, 'gpt-4-32k': 32768,
'gpt-3.5-turbo': 4096, 'gpt-3.5-turbo': 4096,
...@@ -23,17 +28,21 @@ max_context_token_length = { ...@@ -23,17 +28,21 @@ max_context_token_length = {
'text-curie-001': 2049, 'text-curie-001': 2049,
'text-babbage-001': 2049, 'text-babbage-001': 2049,
'text-ada-001': 2049, 'text-ada-001': 2049,
'text-embedding-ada-002': 8191 'text-embedding-ada-002': 8191,
} }
models_by_mode = { models_by_mode = {
'chat': [ 'chat': [
'claude-instant-1', # 100,000 tokens
'claude-2', # 100,000 tokens
'gpt-4', # 8,192 tokens 'gpt-4', # 8,192 tokens
'gpt-4-32k', # 32,768 tokens 'gpt-4-32k', # 32,768 tokens
'gpt-3.5-turbo', # 4,096 tokens 'gpt-3.5-turbo', # 4,096 tokens
'gpt-3.5-turbo-16k', # 16,384 tokens 'gpt-3.5-turbo-16k', # 16,384 tokens
], ],
'completion': [ 'completion': [
'claude-instant-1', # 100,000 tokens
'claude-2', # 100,000 tokens
'gpt-4', # 8,192 tokens 'gpt-4', # 8,192 tokens
'gpt-4-32k', # 32,768 tokens 'gpt-4-32k', # 32,768 tokens
'gpt-3.5-turbo', # 4,096 tokens 'gpt-3.5-turbo', # 4,096 tokens
...@@ -52,6 +61,14 @@ models_by_mode = { ...@@ -52,6 +61,14 @@ models_by_mode = {
model_currency = 'USD' model_currency = 'USD'
model_prices = { model_prices = {
'claude-instant-1': {
'prompt': Decimal('0.00163'),
'completion': Decimal('0.00551'),
},
'claude-2': {
'prompt': Decimal('0.01102'),
'completion': Decimal('0.03268'),
},
'gpt-4': { 'gpt-4': {
'prompt': Decimal('0.03'), 'prompt': Decimal('0.03'),
'completion': Decimal('0.06'), 'completion': Decimal('0.06'),
......
...@@ -56,7 +56,7 @@ class ConversationMessageTask: ...@@ -56,7 +56,7 @@ class ConversationMessageTask:
) )
def init(self): def init(self):
provider_name = LLMBuilder.get_default_provider(self.app.tenant_id) provider_name = LLMBuilder.get_default_provider(self.app.tenant_id, self.model_name)
self.model_dict['provider'] = provider_name self.model_dict['provider'] = provider_name
override_model_configs = None override_model_configs = None
...@@ -90,7 +90,7 @@ class ConversationMessageTask: ...@@ -90,7 +90,7 @@ class ConversationMessageTask:
system_message = PromptBuilder.to_system_message(self.app_model_config.pre_prompt, self.inputs) system_message = PromptBuilder.to_system_message(self.app_model_config.pre_prompt, self.inputs)
system_instruction = system_message.content system_instruction = system_message.content
llm = LLMBuilder.to_llm(self.tenant_id, self.model_name) llm = LLMBuilder.to_llm(self.tenant_id, self.model_name)
system_instruction_tokens = llm.get_messages_tokens([system_message]) system_instruction_tokens = llm.get_num_tokens_from_messages([system_message])
if not self.conversation: if not self.conversation:
self.is_new_conversation = True self.is_new_conversation = True
...@@ -186,6 +186,7 @@ class ConversationMessageTask: ...@@ -186,6 +186,7 @@ class ConversationMessageTask:
if provider and provider.provider_type == ProviderType.SYSTEM.value: if provider and provider.provider_type == ProviderType.SYSTEM.value:
db.session.query(Provider).filter( db.session.query(Provider).filter(
Provider.tenant_id == self.app.tenant_id, Provider.tenant_id == self.app.tenant_id,
Provider.provider_name == provider.provider_name,
Provider.quota_limit > Provider.quota_used Provider.quota_limit > Provider.quota_used
).update({'quota_used': Provider.quota_used + 1}) ).update({'quota_used': Provider.quota_used + 1})
......
...@@ -4,6 +4,7 @@ from typing import List ...@@ -4,6 +4,7 @@ from typing import List
from langchain.embeddings.base import Embeddings from langchain.embeddings.base import Embeddings
from sqlalchemy.exc import IntegrityError from sqlalchemy.exc import IntegrityError
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
from extensions.ext_database import db from extensions.ext_database import db
from libs import helper from libs import helper
from models.dataset import Embedding from models.dataset import Embedding
...@@ -49,6 +50,7 @@ class CacheEmbedding(Embeddings): ...@@ -49,6 +50,7 @@ class CacheEmbedding(Embeddings):
text_embeddings.extend(embedding_results) text_embeddings.extend(embedding_results)
return text_embeddings return text_embeddings
@handle_openai_exceptions
def embed_query(self, text: str) -> List[float]: def embed_query(self, text: str) -> List[float]:
"""Embed query text.""" """Embed query text."""
# use doc embedding cache or store if not exists # use doc embedding cache or store if not exists
......
...@@ -23,6 +23,10 @@ class LLMGenerator: ...@@ -23,6 +23,10 @@ class LLMGenerator:
@classmethod @classmethod
def generate_conversation_name(cls, tenant_id: str, query, answer): def generate_conversation_name(cls, tenant_id: str, query, answer):
prompt = CONVERSATION_TITLE_PROMPT prompt = CONVERSATION_TITLE_PROMPT
if len(query) > 2000:
query = query[:300] + "...[TRUNCATED]..." + query[-300:]
prompt = prompt.format(query=query) prompt = prompt.format(query=query)
llm: StreamableOpenAI = LLMBuilder.to_llm( llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id, tenant_id=tenant_id,
...@@ -52,7 +56,17 @@ class LLMGenerator: ...@@ -52,7 +56,17 @@ class LLMGenerator:
if not message.answer: if not message.answer:
continue continue
message_qa_text = "Human:" + message.query + "\nAI:" + message.answer + "\n" if len(message.query) > 2000:
query = message.query[:300] + "...[TRUNCATED]..." + message.query[-300:]
else:
query = message.query
if len(message.answer) > 2000:
answer = message.answer[:300] + "...[TRUNCATED]..." + message.answer[-300:]
else:
answer = message.answer
message_qa_text = "\n\nHuman:" + query + "\n\nAssistant:" + answer
if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0: if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0:
context += message_qa_text context += message_qa_text
......
...@@ -17,7 +17,7 @@ class IndexBuilder: ...@@ -17,7 +17,7 @@ class IndexBuilder:
model_credentials = LLMBuilder.get_model_credentials( model_credentials = LLMBuilder.get_model_credentials(
tenant_id=dataset.tenant_id, tenant_id=dataset.tenant_id,
model_provider=LLMBuilder.get_default_provider(dataset.tenant_id), model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'),
model_name='text-embedding-ada-002' model_name='text-embedding-ada-002'
) )
......
...@@ -40,6 +40,9 @@ class ProviderTokenNotInitError(Exception): ...@@ -40,6 +40,9 @@ class ProviderTokenNotInitError(Exception):
""" """
description = "Provider Token Not Init" description = "Provider Token Not Init"
def __init__(self, *args, **kwargs):
self.description = args[0] if args else self.description
class QuotaExceededError(Exception): class QuotaExceededError(Exception):
""" """
......
...@@ -8,9 +8,10 @@ from core.llm.provider.base import BaseProvider ...@@ -8,9 +8,10 @@ from core.llm.provider.base import BaseProvider
from core.llm.provider.llm_provider_service import LLMProviderService from core.llm.provider.llm_provider_service import LLMProviderService
from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI
from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI
from core.llm.streamable_chat_anthropic import StreamableChatAnthropic
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI from core.llm.streamable_open_ai import StreamableOpenAI
from models.provider import ProviderType from models.provider import ProviderType, ProviderName
class LLMBuilder: class LLMBuilder:
...@@ -32,43 +33,43 @@ class LLMBuilder: ...@@ -32,43 +33,43 @@ class LLMBuilder:
@classmethod @classmethod
def to_llm(cls, tenant_id: str, model_name: str, **kwargs) -> Union[StreamableOpenAI, StreamableChatOpenAI]: def to_llm(cls, tenant_id: str, model_name: str, **kwargs) -> Union[StreamableOpenAI, StreamableChatOpenAI]:
provider = cls.get_default_provider(tenant_id) provider = cls.get_default_provider(tenant_id, model_name)
model_credentials = cls.get_model_credentials(tenant_id, provider, model_name) model_credentials = cls.get_model_credentials(tenant_id, provider, model_name)
llm_cls = None
mode = cls.get_mode_by_model(model_name) mode = cls.get_mode_by_model(model_name)
if mode == 'chat': if mode == 'chat':
if provider == 'openai': if provider == ProviderName.OPENAI.value:
llm_cls = StreamableChatOpenAI llm_cls = StreamableChatOpenAI
else: elif provider == ProviderName.AZURE_OPENAI.value:
llm_cls = StreamableAzureChatOpenAI llm_cls = StreamableAzureChatOpenAI
elif provider == ProviderName.ANTHROPIC.value:
llm_cls = StreamableChatAnthropic
elif mode == 'completion': elif mode == 'completion':
if provider == 'openai': if provider == ProviderName.OPENAI.value:
llm_cls = StreamableOpenAI llm_cls = StreamableOpenAI
else: elif provider == ProviderName.AZURE_OPENAI.value:
llm_cls = StreamableAzureOpenAI llm_cls = StreamableAzureOpenAI
else:
raise ValueError(f"model name {model_name} is not supported.")
if not llm_cls:
raise ValueError(f"model name {model_name} is not supported.")
model_kwargs = { model_kwargs = {
'model_name': model_name,
'temperature': kwargs.get('temperature', 0),
'max_tokens': kwargs.get('max_tokens', 256),
'top_p': kwargs.get('top_p', 1), 'top_p': kwargs.get('top_p', 1),
'frequency_penalty': kwargs.get('frequency_penalty', 0), 'frequency_penalty': kwargs.get('frequency_penalty', 0),
'presence_penalty': kwargs.get('presence_penalty', 0), 'presence_penalty': kwargs.get('presence_penalty', 0),
'callbacks': kwargs.get('callbacks', None),
'streaming': kwargs.get('streaming', False),
} }
model_extras_kwargs = model_kwargs if mode == 'completion' else {'model_kwargs': model_kwargs} model_kwargs.update(model_credentials)
model_kwargs = llm_cls.get_kwargs_from_model_params(model_kwargs)
return llm_cls( return llm_cls(**model_kwargs)
model_name=model_name,
temperature=kwargs.get('temperature', 0),
max_tokens=kwargs.get('max_tokens', 256),
**model_extras_kwargs,
callbacks=kwargs.get('callbacks', None),
streaming=kwargs.get('streaming', False),
# request_timeout=None
**model_credentials
)
@classmethod @classmethod
def to_llm_from_model(cls, tenant_id: str, model: dict, streaming: bool = False, def to_llm_from_model(cls, tenant_id: str, model: dict, streaming: bool = False,
...@@ -118,14 +119,29 @@ class LLMBuilder: ...@@ -118,14 +119,29 @@ class LLMBuilder:
return provider_service.get_credentials(model_name) return provider_service.get_credentials(model_name)
@classmethod @classmethod
def get_default_provider(cls, tenant_id: str) -> str: def get_default_provider(cls, tenant_id: str, model_name: str) -> str:
provider = BaseProvider.get_valid_provider(tenant_id) provider_name = llm_constant.models[model_name]
if not provider:
raise ProviderTokenNotInitError() if provider_name == 'openai':
# get the default provider (openai / azure_openai) for the tenant
if provider.provider_type == ProviderType.SYSTEM.value: openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.OPENAI.value)
provider_name = 'openai' azure_openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.AZURE_OPENAI.value)
else:
provider_name = provider.provider_name provider = None
if openai_provider:
provider = openai_provider
elif azure_openai_provider:
provider = azure_openai_provider
if not provider:
raise ProviderTokenNotInitError(
f"No valid {provider_name} model provider credentials found. "
f"Please go to Settings -> Model Provider to complete your provider credentials."
)
if provider.provider_type == ProviderType.SYSTEM.value:
provider_name = 'openai'
else:
provider_name = provider.provider_name
return provider_name return provider_name
from typing import Optional import json
import logging
from typing import Optional, Union
import anthropic
from langchain.chat_models import ChatAnthropic
from langchain.schema import HumanMessage
from core import hosted_llm_credentials
from core.llm.error import ProviderTokenNotInitError
from core.llm.provider.base import BaseProvider from core.llm.provider.base import BaseProvider
from models.provider import ProviderName from core.llm.provider.errors import ValidateFailedError
from models.provider import ProviderName, ProviderType
class AnthropicProvider(BaseProvider): class AnthropicProvider(BaseProvider):
def get_models(self, model_id: Optional[str] = None) -> list[dict]: def get_models(self, model_id: Optional[str] = None) -> list[dict]:
credentials = self.get_credentials(model_id) return [
# todo {
return [] 'id': 'claude-instant-1',
'name': 'claude-instant-1',
},
{
'id': 'claude-2',
'name': 'claude-2',
},
]
def get_credentials(self, model_id: Optional[str] = None) -> dict: def get_credentials(self, model_id: Optional[str] = None) -> dict:
return self.get_provider_api_key(model_id=model_id)
def get_provider_name(self):
return ProviderName.ANTHROPIC
def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
""" """
Returns the API credentials for Azure OpenAI as a dictionary, for the given tenant_id. Returns the provider configs.
The dictionary contains keys: azure_api_type, azure_api_version, azure_api_base, and azure_api_key.
""" """
return { try:
'anthropic_api_key': self.get_provider_api_key(model_id=model_id) config = self.get_provider_api_key(only_custom=only_custom)
} except:
config = {
'anthropic_api_key': ''
}
def get_provider_name(self): if obfuscated:
return ProviderName.ANTHROPIC if not config.get('anthropic_api_key'):
\ No newline at end of file config = {
'anthropic_api_key': ''
}
config['anthropic_api_key'] = self.obfuscated_token(config.get('anthropic_api_key'))
return config
return config
def get_encrypted_token(self, config: Union[dict | str]):
"""
Returns the encrypted token.
"""
return json.dumps({
'anthropic_api_key': self.encrypt_token(config['anthropic_api_key'])
})
def get_decrypted_token(self, token: str):
"""
Returns the decrypted token.
"""
config = json.loads(token)
config['anthropic_api_key'] = self.decrypt_token(config['anthropic_api_key'])
return config
def get_token_type(self):
return dict
def config_validate(self, config: Union[dict | str]):
"""
Validates the given config.
"""
# check OpenAI / Azure OpenAI credential is valid
openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.OPENAI.value)
azure_openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.AZURE_OPENAI.value)
provider = None
if openai_provider:
provider = openai_provider
elif azure_openai_provider:
provider = azure_openai_provider
if not provider:
raise ValidateFailedError(f"OpenAI or Azure OpenAI provider must be configured first.")
if provider.provider_type == ProviderType.SYSTEM.value:
quota_used = provider.quota_used if provider.quota_used is not None else 0
quota_limit = provider.quota_limit if provider.quota_limit is not None else 0
if quota_used >= quota_limit:
raise ValidateFailedError(f"Your quota for Dify Hosted OpenAI has been exhausted, "
f"please configure OpenAI or Azure OpenAI provider first.")
try:
if not isinstance(config, dict):
raise ValueError('Config must be a object.')
if 'anthropic_api_key' not in config:
raise ValueError('anthropic_api_key must be provided.')
chat_llm = ChatAnthropic(
model='claude-instant-1',
anthropic_api_key=config['anthropic_api_key'],
max_tokens_to_sample=10,
temperature=0,
default_request_timeout=60
)
messages = [
HumanMessage(
content="ping"
)
]
chat_llm(messages)
except anthropic.APIConnectionError as ex:
raise ValidateFailedError(f"Anthropic: Connection error, cause: {ex.__cause__}")
except (anthropic.APIStatusError, anthropic.RateLimitError) as ex:
raise ValidateFailedError(f"Anthropic: Error code: {ex.status_code} - "
f"{ex.body['error']['type']}: {ex.body['error']['message']}")
except Exception as ex:
logging.exception('Anthropic config validation failed')
raise ex
def get_hosted_credentials(self) -> Union[str | dict]:
if not hosted_llm_credentials.anthropic or not hosted_llm_credentials.anthropic.api_key:
raise ProviderTokenNotInitError(
f"No valid {self.get_provider_name().value} model provider credentials found. "
f"Please go to Settings -> Model Provider to complete your provider credentials."
)
return {'anthropic_api_key': hosted_llm_credentials.anthropic.api_key}
...@@ -52,12 +52,12 @@ class AzureProvider(BaseProvider): ...@@ -52,12 +52,12 @@ class AzureProvider(BaseProvider):
def get_provider_name(self): def get_provider_name(self):
return ProviderName.AZURE_OPENAI return ProviderName.AZURE_OPENAI
def get_provider_configs(self, obfuscated: bool = False) -> Union[str | dict]: def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
""" """
Returns the provider configs. Returns the provider configs.
""" """
try: try:
config = self.get_provider_api_key() config = self.get_provider_api_key(only_custom=only_custom)
except: except:
config = { config = {
'openai_api_type': 'azure', 'openai_api_type': 'azure',
...@@ -81,7 +81,6 @@ class AzureProvider(BaseProvider): ...@@ -81,7 +81,6 @@ class AzureProvider(BaseProvider):
return config return config
def get_token_type(self): def get_token_type(self):
# TODO: change to dict when implemented
return dict return dict
def config_validate(self, config: Union[dict | str]): def config_validate(self, config: Union[dict | str]):
......
...@@ -2,7 +2,7 @@ import base64 ...@@ -2,7 +2,7 @@ import base64
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Optional, Union from typing import Optional, Union
from core import hosted_llm_credentials from core.constant import llm_constant
from core.llm.error import QuotaExceededError, ModelCurrentlyNotSupportError, ProviderTokenNotInitError from core.llm.error import QuotaExceededError, ModelCurrentlyNotSupportError, ProviderTokenNotInitError
from extensions.ext_database import db from extensions.ext_database import db
from libs import rsa from libs import rsa
...@@ -14,15 +14,18 @@ class BaseProvider(ABC): ...@@ -14,15 +14,18 @@ class BaseProvider(ABC):
def __init__(self, tenant_id: str): def __init__(self, tenant_id: str):
self.tenant_id = tenant_id self.tenant_id = tenant_id
def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> Union[str | dict]: def get_provider_api_key(self, model_id: Optional[str] = None, only_custom: bool = False) -> Union[str | dict]:
""" """
Returns the decrypted API key for the given tenant_id and provider_name. Returns the decrypted API key for the given tenant_id and provider_name.
If the provider is of type SYSTEM and the quota is exceeded, raises a QuotaExceededError. If the provider is of type SYSTEM and the quota is exceeded, raises a QuotaExceededError.
If the provider is not found or not valid, raises a ProviderTokenNotInitError. If the provider is not found or not valid, raises a ProviderTokenNotInitError.
""" """
provider = self.get_provider(prefer_custom) provider = self.get_provider(only_custom)
if not provider: if not provider:
raise ProviderTokenNotInitError() raise ProviderTokenNotInitError(
f"No valid {llm_constant.models[model_id]} model provider credentials found. "
f"Please go to Settings -> Model Provider to complete your provider credentials."
)
if provider.provider_type == ProviderType.SYSTEM.value: if provider.provider_type == ProviderType.SYSTEM.value:
quota_used = provider.quota_used if provider.quota_used is not None else 0 quota_used = provider.quota_used if provider.quota_used is not None else 0
...@@ -38,18 +41,19 @@ class BaseProvider(ABC): ...@@ -38,18 +41,19 @@ class BaseProvider(ABC):
else: else:
return self.get_decrypted_token(provider.encrypted_config) return self.get_decrypted_token(provider.encrypted_config)
def get_provider(self, prefer_custom: bool) -> Optional[Provider]: def get_provider(self, only_custom: bool = False) -> Optional[Provider]:
""" """
Returns the Provider instance for the given tenant_id and provider_name. Returns the Provider instance for the given tenant_id and provider_name.
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag. If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag.
""" """
return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, prefer_custom) return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, only_custom)
@classmethod @classmethod
def get_valid_provider(cls, tenant_id: str, provider_name: str = None, prefer_custom: bool = False) -> Optional[Provider]: def get_valid_provider(cls, tenant_id: str, provider_name: str = None, only_custom: bool = False) -> Optional[
Provider]:
""" """
Returns the Provider instance for the given tenant_id and provider_name. Returns the Provider instance for the given tenant_id and provider_name.
If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag. If both CUSTOM and System providers exist.
""" """
query = db.session.query(Provider).filter( query = db.session.query(Provider).filter(
Provider.tenant_id == tenant_id Provider.tenant_id == tenant_id
...@@ -58,39 +62,31 @@ class BaseProvider(ABC): ...@@ -58,39 +62,31 @@ class BaseProvider(ABC):
if provider_name: if provider_name:
query = query.filter(Provider.provider_name == provider_name) query = query.filter(Provider.provider_name == provider_name)
providers = query.order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all() if only_custom:
query = query.filter(Provider.provider_type == ProviderType.CUSTOM.value)
custom_provider = None providers = query.order_by(Provider.provider_type.asc()).all()
system_provider = None
for provider in providers: for provider in providers:
if provider.provider_type == ProviderType.CUSTOM.value and provider.is_valid and provider.encrypted_config: if provider.provider_type == ProviderType.CUSTOM.value and provider.is_valid and provider.encrypted_config:
custom_provider = provider return provider
elif provider.provider_type == ProviderType.SYSTEM.value and provider.is_valid: elif provider.provider_type == ProviderType.SYSTEM.value and provider.is_valid:
system_provider = provider return provider
if custom_provider:
return custom_provider
elif system_provider:
return system_provider
else:
return None
def get_hosted_credentials(self) -> str: return None
if self.get_provider_name() != ProviderName.OPENAI:
raise ProviderTokenNotInitError()
if not hosted_llm_credentials.openai or not hosted_llm_credentials.openai.api_key: def get_hosted_credentials(self) -> Union[str | dict]:
raise ProviderTokenNotInitError() raise ProviderTokenNotInitError(
f"No valid {self.get_provider_name().value} model provider credentials found. "
return hosted_llm_credentials.openai.api_key f"Please go to Settings -> Model Provider to complete your provider credentials."
)
def get_provider_configs(self, obfuscated: bool = False) -> Union[str | dict]: def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
""" """
Returns the provider configs. Returns the provider configs.
""" """
try: try:
config = self.get_provider_api_key() config = self.get_provider_api_key(only_custom=only_custom)
except: except:
config = '' config = ''
......
...@@ -31,11 +31,11 @@ class LLMProviderService: ...@@ -31,11 +31,11 @@ class LLMProviderService:
def get_credentials(self, model_id: Optional[str] = None) -> dict: def get_credentials(self, model_id: Optional[str] = None) -> dict:
return self.provider.get_credentials(model_id) return self.provider.get_credentials(model_id)
def get_provider_configs(self, obfuscated: bool = False) -> Union[str | dict]: def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
return self.provider.get_provider_configs(obfuscated) return self.provider.get_provider_configs(obfuscated=obfuscated, only_custom=only_custom)
def get_provider_db_record(self, prefer_custom: bool = False) -> Optional[Provider]: def get_provider_db_record(self) -> Optional[Provider]:
return self.provider.get_provider(prefer_custom) return self.provider.get_provider()
def config_validate(self, config: Union[dict | str]): def config_validate(self, config: Union[dict | str]):
""" """
......
...@@ -4,6 +4,8 @@ from typing import Optional, Union ...@@ -4,6 +4,8 @@ from typing import Optional, Union
import openai import openai
from openai.error import AuthenticationError, OpenAIError from openai.error import AuthenticationError, OpenAIError
from core import hosted_llm_credentials
from core.llm.error import ProviderTokenNotInitError
from core.llm.moderation import Moderation from core.llm.moderation import Moderation
from core.llm.provider.base import BaseProvider from core.llm.provider.base import BaseProvider
from core.llm.provider.errors import ValidateFailedError from core.llm.provider.errors import ValidateFailedError
...@@ -42,3 +44,12 @@ class OpenAIProvider(BaseProvider): ...@@ -42,3 +44,12 @@ class OpenAIProvider(BaseProvider):
except Exception as ex: except Exception as ex:
logging.exception('OpenAI config validation failed') logging.exception('OpenAI config validation failed')
raise ex raise ex
def get_hosted_credentials(self) -> Union[str | dict]:
if not hosted_llm_credentials.openai or not hosted_llm_credentials.openai.api_key:
raise ProviderTokenNotInitError(
f"No valid {self.get_provider_name().value} model provider credentials found. "
f"Please go to Settings -> Model Provider to complete your provider credentials."
)
return hosted_llm_credentials.openai.api_key
from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun, Callbacks from langchain.callbacks.manager import Callbacks
from langchain.schema import BaseMessage, ChatResult, LLMResult from langchain.schema import BaseMessage, LLMResult
from langchain.chat_models import AzureChatOpenAI from langchain.chat_models import AzureChatOpenAI
from typing import Optional, List, Dict, Any from typing import Optional, List, Dict, Any
from pydantic import root_validator from pydantic import root_validator
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class StreamableAzureChatOpenAI(AzureChatOpenAI): class StreamableAzureChatOpenAI(AzureChatOpenAI):
...@@ -46,30 +46,7 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI): ...@@ -46,30 +46,7 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI):
"organization": self.openai_organization if self.openai_organization else None, "organization": self.openai_organization if self.openai_organization else None,
} }
def get_messages_tokens(self, messages: List[BaseMessage]) -> int: @handle_openai_exceptions
"""Get the number of tokens in a list of messages.
Args:
messages: The messages to count the tokens of.
Returns:
The number of tokens in the messages.
"""
tokens_per_message = 5
tokens_per_request = 3
message_tokens = tokens_per_request
message_strs = ''
for message in messages:
message_strs += message.content
message_tokens += tokens_per_message
# calc once
message_tokens += self.get_num_tokens(message_strs)
return message_tokens
@handle_llm_exceptions
def generate( def generate(
self, self,
messages: List[List[BaseMessage]], messages: List[List[BaseMessage]],
...@@ -79,12 +56,18 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI): ...@@ -79,12 +56,18 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI):
) -> LLMResult: ) -> LLMResult:
return super().generate(messages, stop, callbacks, **kwargs) return super().generate(messages, stop, callbacks, **kwargs)
@handle_llm_exceptions_async @classmethod
async def agenerate( def get_kwargs_from_model_params(cls, params: dict):
self, model_kwargs = {
messages: List[List[BaseMessage]], 'top_p': params.get('top_p', 1),
stop: Optional[List[str]] = None, 'frequency_penalty': params.get('frequency_penalty', 0),
callbacks: Callbacks = None, 'presence_penalty': params.get('presence_penalty', 0),
**kwargs: Any, }
) -> LLMResult:
return await super().agenerate(messages, stop, callbacks, **kwargs) del params['top_p']
del params['frequency_penalty']
del params['presence_penalty']
params['model_kwargs'] = model_kwargs
return params
...@@ -5,7 +5,7 @@ from typing import Optional, List, Dict, Mapping, Any ...@@ -5,7 +5,7 @@ from typing import Optional, List, Dict, Mapping, Any
from pydantic import root_validator from pydantic import root_validator
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class StreamableAzureOpenAI(AzureOpenAI): class StreamableAzureOpenAI(AzureOpenAI):
...@@ -50,7 +50,7 @@ class StreamableAzureOpenAI(AzureOpenAI): ...@@ -50,7 +50,7 @@ class StreamableAzureOpenAI(AzureOpenAI):
"organization": self.openai_organization if self.openai_organization else None, "organization": self.openai_organization if self.openai_organization else None,
}} }}
@handle_llm_exceptions @handle_openai_exceptions
def generate( def generate(
self, self,
prompts: List[str], prompts: List[str],
...@@ -60,12 +60,6 @@ class StreamableAzureOpenAI(AzureOpenAI): ...@@ -60,12 +60,6 @@ class StreamableAzureOpenAI(AzureOpenAI):
) -> LLMResult: ) -> LLMResult:
return super().generate(prompts, stop, callbacks, **kwargs) return super().generate(prompts, stop, callbacks, **kwargs)
@handle_llm_exceptions_async @classmethod
async def agenerate( def get_kwargs_from_model_params(cls, params: dict):
self, return params
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
return await super().agenerate(prompts, stop, callbacks, **kwargs)
from typing import List, Optional, Any, Dict
from langchain.callbacks.manager import Callbacks
from langchain.chat_models import ChatAnthropic
from langchain.schema import BaseMessage, LLMResult
from core.llm.wrappers.anthropic_wrapper import handle_anthropic_exceptions
class StreamableChatAnthropic(ChatAnthropic):
"""
Wrapper around Anthropic's large language model.
"""
@handle_anthropic_exceptions
def generate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
return super().generate(messages, stop, callbacks, tags=tags, metadata=metadata, **kwargs)
@classmethod
def get_kwargs_from_model_params(cls, params: dict):
params['model'] = params.get('model_name')
del params['model_name']
params['max_tokens_to_sample'] = params.get('max_tokens')
del params['max_tokens']
del params['frequency_penalty']
del params['presence_penalty']
return params
...@@ -7,7 +7,7 @@ from typing import Optional, List, Dict, Any ...@@ -7,7 +7,7 @@ from typing import Optional, List, Dict, Any
from pydantic import root_validator from pydantic import root_validator
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class StreamableChatOpenAI(ChatOpenAI): class StreamableChatOpenAI(ChatOpenAI):
...@@ -48,30 +48,7 @@ class StreamableChatOpenAI(ChatOpenAI): ...@@ -48,30 +48,7 @@ class StreamableChatOpenAI(ChatOpenAI):
"organization": self.openai_organization if self.openai_organization else None, "organization": self.openai_organization if self.openai_organization else None,
} }
def get_messages_tokens(self, messages: List[BaseMessage]) -> int: @handle_openai_exceptions
"""Get the number of tokens in a list of messages.
Args:
messages: The messages to count the tokens of.
Returns:
The number of tokens in the messages.
"""
tokens_per_message = 5
tokens_per_request = 3
message_tokens = tokens_per_request
message_strs = ''
for message in messages:
message_strs += message.content
message_tokens += tokens_per_message
# calc once
message_tokens += self.get_num_tokens(message_strs)
return message_tokens
@handle_llm_exceptions
def generate( def generate(
self, self,
messages: List[List[BaseMessage]], messages: List[List[BaseMessage]],
...@@ -81,12 +58,18 @@ class StreamableChatOpenAI(ChatOpenAI): ...@@ -81,12 +58,18 @@ class StreamableChatOpenAI(ChatOpenAI):
) -> LLMResult: ) -> LLMResult:
return super().generate(messages, stop, callbacks, **kwargs) return super().generate(messages, stop, callbacks, **kwargs)
@handle_llm_exceptions_async @classmethod
async def agenerate( def get_kwargs_from_model_params(cls, params: dict):
self, model_kwargs = {
messages: List[List[BaseMessage]], 'top_p': params.get('top_p', 1),
stop: Optional[List[str]] = None, 'frequency_penalty': params.get('frequency_penalty', 0),
callbacks: Callbacks = None, 'presence_penalty': params.get('presence_penalty', 0),
**kwargs: Any, }
) -> LLMResult:
return await super().agenerate(messages, stop, callbacks, **kwargs) del params['top_p']
del params['frequency_penalty']
del params['presence_penalty']
params['model_kwargs'] = model_kwargs
return params
...@@ -6,7 +6,7 @@ from typing import Optional, List, Dict, Any, Mapping ...@@ -6,7 +6,7 @@ from typing import Optional, List, Dict, Any, Mapping
from langchain import OpenAI from langchain import OpenAI
from pydantic import root_validator from pydantic import root_validator
from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class StreamableOpenAI(OpenAI): class StreamableOpenAI(OpenAI):
...@@ -49,7 +49,7 @@ class StreamableOpenAI(OpenAI): ...@@ -49,7 +49,7 @@ class StreamableOpenAI(OpenAI):
"organization": self.openai_organization if self.openai_organization else None, "organization": self.openai_organization if self.openai_organization else None,
}} }}
@handle_llm_exceptions @handle_openai_exceptions
def generate( def generate(
self, self,
prompts: List[str], prompts: List[str],
...@@ -59,12 +59,6 @@ class StreamableOpenAI(OpenAI): ...@@ -59,12 +59,6 @@ class StreamableOpenAI(OpenAI):
) -> LLMResult: ) -> LLMResult:
return super().generate(prompts, stop, callbacks, **kwargs) return super().generate(prompts, stop, callbacks, **kwargs)
@handle_llm_exceptions_async @classmethod
async def agenerate( def get_kwargs_from_model_params(cls, params: dict):
self, return params
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
return await super().agenerate(prompts, stop, callbacks, **kwargs)
import openai import openai
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
from models.provider import ProviderName from models.provider import ProviderName
from core.llm.error_handle_wraps import handle_llm_exceptions
from core.llm.provider.base import BaseProvider from core.llm.provider.base import BaseProvider
...@@ -13,7 +14,7 @@ class Whisper: ...@@ -13,7 +14,7 @@ class Whisper:
self.client = openai.Audio self.client = openai.Audio
self.credentials = provider.get_credentials() self.credentials = provider.get_credentials()
@handle_llm_exceptions @handle_openai_exceptions
def transcribe(self, file): def transcribe(self, file):
return self.client.transcribe( return self.client.transcribe(
model='whisper-1', model='whisper-1',
......
import logging
from functools import wraps
import anthropic
from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, \
LLMBadRequestError
def handle_anthropic_exceptions(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except anthropic.APIConnectionError as e:
logging.exception("Failed to connect to Anthropic API.")
raise LLMAPIConnectionError(f"Anthropic: The server could not be reached, cause: {e.__cause__}")
except anthropic.RateLimitError:
raise LLMRateLimitError("Anthropic: A 429 status code was received; we should back off a bit.")
except anthropic.AuthenticationError as e:
raise LLMAuthorizationError(f"Anthropic: {e.message}")
except anthropic.BadRequestError as e:
raise LLMBadRequestError(f"Anthropic: {e.message}")
except anthropic.APIStatusError as e:
raise LLMAPIUnavailableError(f"Anthropic: code: {e.status_code}, cause: {e.message}")
return wrapper
...@@ -7,7 +7,7 @@ from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRat ...@@ -7,7 +7,7 @@ from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRat
LLMBadRequestError LLMBadRequestError
def handle_llm_exceptions(func): def handle_openai_exceptions(func):
@wraps(func) @wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
try: try:
...@@ -29,27 +29,3 @@ def handle_llm_exceptions(func): ...@@ -29,27 +29,3 @@ def handle_llm_exceptions(func):
raise LLMBadRequestError(e.__class__.__name__ + ":" + str(e)) raise LLMBadRequestError(e.__class__.__name__ + ":" + str(e))
return wrapper return wrapper
def handle_llm_exceptions_async(func):
@wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except openai.error.InvalidRequestError as e:
logging.exception("Invalid request to OpenAI API.")
raise LLMBadRequestError(str(e))
except openai.error.APIConnectionError as e:
logging.exception("Failed to connect to OpenAI API.")
raise LLMAPIConnectionError(e.__class__.__name__ + ":" + str(e))
except (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout) as e:
logging.exception("OpenAI service unavailable.")
raise LLMAPIUnavailableError(e.__class__.__name__ + ":" + str(e))
except openai.error.RateLimitError as e:
raise LLMRateLimitError(str(e))
except openai.error.AuthenticationError as e:
raise LLMAuthorizationError(str(e))
except openai.error.OpenAIError as e:
raise LLMBadRequestError(e.__class__.__name__ + ":" + str(e))
return wrapper
from typing import Any, List, Dict, Union from typing import Any, List, Dict, Union
from langchain.memory.chat_memory import BaseChatMemory from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import get_buffer_string, BaseMessage, HumanMessage, AIMessage from langchain.schema import get_buffer_string, BaseMessage, HumanMessage, AIMessage, BaseLanguageModel
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI from core.llm.streamable_open_ai import StreamableOpenAI
...@@ -12,8 +12,8 @@ from models.model import Conversation, Message ...@@ -12,8 +12,8 @@ from models.model import Conversation, Message
class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory): class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory):
conversation: Conversation conversation: Conversation
human_prefix: str = "Human" human_prefix: str = "Human"
ai_prefix: str = "AI" ai_prefix: str = "Assistant"
llm: Union[StreamableChatOpenAI | StreamableOpenAI] llm: BaseLanguageModel
memory_key: str = "chat_history" memory_key: str = "chat_history"
max_token_limit: int = 2000 max_token_limit: int = 2000
message_limit: int = 10 message_limit: int = 10
...@@ -38,12 +38,12 @@ class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory): ...@@ -38,12 +38,12 @@ class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory):
return chat_messages return chat_messages
# prune the chat message if it exceeds the max token limit # prune the chat message if it exceeds the max token limit
curr_buffer_length = self.llm.get_messages_tokens(chat_messages) curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages)
if curr_buffer_length > self.max_token_limit: if curr_buffer_length > self.max_token_limit:
pruned_memory = [] pruned_memory = []
while curr_buffer_length > self.max_token_limit and chat_messages: while curr_buffer_length > self.max_token_limit and chat_messages:
pruned_memory.append(chat_messages.pop(0)) pruned_memory.append(chat_messages.pop(0))
curr_buffer_length = self.llm.get_messages_tokens(chat_messages) curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages)
return chat_messages return chat_messages
......
from flask import current_app
from events.tenant_event import tenant_was_updated from events.tenant_event import tenant_was_updated
from models.provider import ProviderName
from services.provider_service import ProviderService from services.provider_service import ProviderService
...@@ -6,4 +9,16 @@ from services.provider_service import ProviderService ...@@ -6,4 +9,16 @@ from services.provider_service import ProviderService
def handle(sender, **kwargs): def handle(sender, **kwargs):
tenant = sender tenant = sender
if tenant.status == 'normal': if tenant.status == 'normal':
ProviderService.create_system_provider(tenant) ProviderService.create_system_provider(
tenant,
ProviderName.OPENAI.value,
current_app.config['OPENAI_HOSTED_QUOTA_LIMIT'],
True
)
ProviderService.create_system_provider(
tenant,
ProviderName.ANTHROPIC.value,
current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'],
True
)
from flask import current_app
from events.tenant_event import tenant_was_created from events.tenant_event import tenant_was_created
from models.provider import ProviderName
from services.provider_service import ProviderService from services.provider_service import ProviderService
...@@ -6,4 +9,16 @@ from services.provider_service import ProviderService ...@@ -6,4 +9,16 @@ from services.provider_service import ProviderService
def handle(sender, **kwargs): def handle(sender, **kwargs):
tenant = sender tenant = sender
if tenant.status == 'normal': if tenant.status == 'normal':
ProviderService.create_system_provider(tenant) ProviderService.create_system_provider(
tenant,
ProviderName.OPENAI.value,
current_app.config['OPENAI_HOSTED_QUOTA_LIMIT'],
True
)
ProviderService.create_system_provider(
tenant,
ProviderName.ANTHROPIC.value,
current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'],
True
)
from typing import Optional
import resend
from flask import Flask
class Mail:
def __init__(self):
self._client = None
self._default_send_from = None
def is_inited(self) -> bool:
return self._client is not None
def init_app(self, app: Flask):
if app.config.get('MAIL_TYPE'):
if app.config.get('MAIL_DEFAULT_SEND_FROM'):
self._default_send_from = app.config.get('MAIL_DEFAULT_SEND_FROM')
if app.config.get('MAIL_TYPE') == 'resend':
api_key = app.config.get('RESEND_API_KEY')
if not api_key:
raise ValueError('RESEND_API_KEY is not set')
resend.api_key = api_key
self._client = resend.Emails
else:
raise ValueError('Unsupported mail type {}'.format(app.config.get('MAIL_TYPE')))
def send(self, to: str, subject: str, html: str, from_: Optional[str] = None):
if not self._client:
raise ValueError('Mail client is not initialized')
if not from_ and self._default_send_from:
from_ = self._default_send_from
if not from_:
raise ValueError('mail from is not set')
if not to:
raise ValueError('mail to is not set')
if not subject:
raise ValueError('mail subject is not set')
if not html:
raise ValueError('mail html is not set')
self._client.send({
"from": from_,
"to": to,
"subject": subject,
"html": html
})
def init_app(app: Flask):
mail.init_app(app)
mail = Mail()
...@@ -38,6 +38,10 @@ class Account(UserMixin, db.Model): ...@@ -38,6 +38,10 @@ class Account(UserMixin, db.Model):
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
@property
def is_password_set(self):
return self.password is not None
@property @property
def current_tenant(self): def current_tenant(self):
return self._current_tenant return self._current_tenant
......
...@@ -57,7 +57,8 @@ class App(db.Model): ...@@ -57,7 +57,8 @@ class App(db.Model):
@property @property
def api_base_url(self): def api_base_url(self):
return (current_app.config['API_URL'] if current_app.config['API_URL'] else request.host_url.rstrip('/')) + '/v1' return (current_app.config['SERVICE_API_URL'] if current_app.config['SERVICE_API_URL']
else request.host_url.rstrip('/')) + '/v1'
@property @property
def tenant(self): def tenant(self):
...@@ -542,7 +543,7 @@ class Site(db.Model): ...@@ -542,7 +543,7 @@ class Site(db.Model):
@property @property
def app_base_url(self): def app_base_url(self):
return (current_app.config['APP_URL'] if current_app.config['APP_URL'] else request.host_url.rstrip('/')) return (current_app.config['APP_WEB_URL'] if current_app.config['APP_WEB_URL'] else request.host_url.rstrip('/'))
class ApiToken(db.Model): class ApiToken(db.Model):
......
...@@ -10,7 +10,7 @@ flask-session2==1.3.1 ...@@ -10,7 +10,7 @@ flask-session2==1.3.1
flask-cors==3.0.10 flask-cors==3.0.10
gunicorn~=20.1.0 gunicorn~=20.1.0
gevent~=22.10.2 gevent~=22.10.2
langchain==0.0.228 langchain==0.0.230
openai~=0.27.8 openai~=0.27.8
psycopg2-binary~=2.9.6 psycopg2-binary~=2.9.6
pycryptodome==3.17 pycryptodome==3.17
...@@ -21,7 +21,7 @@ Authlib==1.2.0 ...@@ -21,7 +21,7 @@ Authlib==1.2.0
boto3~=1.26.123 boto3~=1.26.123
tenacity==8.2.2 tenacity==8.2.2
cachetools~=5.3.0 cachetools~=5.3.0
weaviate-client~=3.16.2 weaviate-client~=3.21.0
qdrant_client~=1.1.6 qdrant_client~=1.1.6
mailchimp-transactional~=1.0.50 mailchimp-transactional~=1.0.50
scikit-learn==1.2.2 scikit-learn==1.2.2
...@@ -33,7 +33,9 @@ openpyxl==3.1.2 ...@@ -33,7 +33,9 @@ openpyxl==3.1.2
chardet~=5.1.0 chardet~=5.1.0
docx2txt==0.8 docx2txt==0.8
pypdfium2==4.16.0 pypdfium2==4.16.0
resend~=0.5.1
pyjwt~=2.6.0 pyjwt~=2.6.0
anthropic~=0.3.4
newspaper3k==0.2.8 newspaper3k==0.2.8
google-api-python-client==2.90.0 google-api-python-client==2.90.0
wikipedia==1.4.0 wikipedia==1.4.0
......
...@@ -2,13 +2,16 @@ ...@@ -2,13 +2,16 @@
import base64 import base64
import logging import logging
import secrets import secrets
import uuid
from datetime import datetime from datetime import datetime
from hashlib import sha256
from typing import Optional from typing import Optional
from flask import session from flask import session
from sqlalchemy import func from sqlalchemy import func
from events.tenant_event import tenant_was_created from events.tenant_event import tenant_was_created
from extensions.ext_redis import redis_client
from services.errors.account import AccountLoginError, CurrentPasswordIncorrectError, LinkAccountIntegrateError, \ from services.errors.account import AccountLoginError, CurrentPasswordIncorrectError, LinkAccountIntegrateError, \
TenantNotFound, AccountNotLinkTenantError, InvalidActionError, CannotOperateSelfError, MemberNotInTenantError, \ TenantNotFound, AccountNotLinkTenantError, InvalidActionError, CannotOperateSelfError, MemberNotInTenantError, \
RoleAlreadyAssignedError, NoPermissionError, AccountRegisterError, AccountAlreadyInTenantError RoleAlreadyAssignedError, NoPermissionError, AccountRegisterError, AccountAlreadyInTenantError
...@@ -16,6 +19,7 @@ from libs.helper import get_remote_ip ...@@ -16,6 +19,7 @@ from libs.helper import get_remote_ip
from libs.password import compare_password, hash_password from libs.password import compare_password, hash_password
from libs.rsa import generate_key_pair from libs.rsa import generate_key_pair
from models.account import * from models.account import *
from tasks.mail_invite_member_task import send_invite_member_mail_task
class AccountService: class AccountService:
...@@ -48,12 +52,18 @@ class AccountService: ...@@ -48,12 +52,18 @@ class AccountService:
@staticmethod @staticmethod
def update_account_password(account, password, new_password): def update_account_password(account, password, new_password):
"""update account password""" """update account password"""
# todo: split validation and update
if account.password and not compare_password(password, account.password, account.password_salt): if account.password and not compare_password(password, account.password, account.password_salt):
raise CurrentPasswordIncorrectError("Current password is incorrect.") raise CurrentPasswordIncorrectError("Current password is incorrect.")
password_hashed = hash_password(new_password, account.password_salt)
# generate password salt
salt = secrets.token_bytes(16)
base64_salt = base64.b64encode(salt).decode()
# encrypt password with salt
password_hashed = hash_password(new_password, salt)
base64_password_hashed = base64.b64encode(password_hashed).decode() base64_password_hashed = base64.b64encode(password_hashed).decode()
account.password = base64_password_hashed account.password = base64_password_hashed
account.password_salt = base64_salt
db.session.commit() db.session.commit()
return account return account
...@@ -283,8 +293,6 @@ class TenantService: ...@@ -283,8 +293,6 @@ class TenantService:
@staticmethod @staticmethod
def remove_member_from_tenant(tenant: Tenant, account: Account, operator: Account) -> None: def remove_member_from_tenant(tenant: Tenant, account: Account, operator: Account) -> None:
"""Remove member from tenant""" """Remove member from tenant"""
# todo: check permission
if operator.id == account.id and TenantService.check_member_permission(tenant, operator, account, 'remove'): if operator.id == account.id and TenantService.check_member_permission(tenant, operator, account, 'remove'):
raise CannotOperateSelfError("Cannot operate self.") raise CannotOperateSelfError("Cannot operate self.")
...@@ -293,6 +301,12 @@ class TenantService: ...@@ -293,6 +301,12 @@ class TenantService:
raise MemberNotInTenantError("Member not in tenant.") raise MemberNotInTenantError("Member not in tenant.")
db.session.delete(ta) db.session.delete(ta)
account.initialized_at = None
account.status = AccountStatus.PENDING.value
account.password = None
account.password_salt = None
db.session.commit() db.session.commit()
@staticmethod @staticmethod
...@@ -332,8 +346,8 @@ class TenantService: ...@@ -332,8 +346,8 @@ class TenantService:
class RegisterService: class RegisterService:
@staticmethod @classmethod
def register(email, name, password: str = None, open_id: str = None, provider: str = None) -> Account: def register(cls, email, name, password: str = None, open_id: str = None, provider: str = None) -> Account:
db.session.begin_nested() db.session.begin_nested()
"""Register account""" """Register account"""
try: try:
...@@ -359,9 +373,9 @@ class RegisterService: ...@@ -359,9 +373,9 @@ class RegisterService:
return account return account
@staticmethod @classmethod
def invite_new_member(tenant: Tenant, email: str, role: str = 'normal', def invite_new_member(cls, tenant: Tenant, email: str, role: str = 'normal',
inviter: Account = None) -> TenantAccountJoin: inviter: Account = None) -> str:
"""Invite new member""" """Invite new member"""
account = Account.query.filter_by(email=email).first() account = Account.query.filter_by(email=email).first()
...@@ -380,5 +394,71 @@ class RegisterService: ...@@ -380,5 +394,71 @@ class RegisterService:
if ta: if ta:
raise AccountAlreadyInTenantError("Account already in tenant.") raise AccountAlreadyInTenantError("Account already in tenant.")
ta = TenantService.create_tenant_member(tenant, account, role) TenantService.create_tenant_member(tenant, account, role)
return ta
token = cls.generate_invite_token(tenant, account)
# send email
send_invite_member_mail_task.delay(
to=email,
token=cls.generate_invite_token(tenant, account),
inviter_name=inviter.name if inviter else 'Dify',
workspace_id=tenant.id,
workspace_name=tenant.name,
)
return token
@classmethod
def generate_invite_token(cls, tenant: Tenant, account: Account) -> str:
token = str(uuid.uuid4())
email_hash = sha256(account.email.encode()).hexdigest()
cache_key = 'member_invite_token:{}, {}:{}'.format(str(tenant.id), email_hash, token)
redis_client.setex(cache_key, 3600, str(account.id))
return token
@classmethod
def revoke_token(cls, workspace_id: str, email: str, token: str):
email_hash = sha256(email.encode()).hexdigest()
cache_key = 'member_invite_token:{}, {}:{}'.format(workspace_id, email_hash, token)
redis_client.delete(cache_key)
@classmethod
def get_account_if_token_valid(cls, workspace_id: str, email: str, token: str) -> Optional[Account]:
tenant = db.session.query(Tenant).filter(
Tenant.id == workspace_id,
Tenant.status == 'normal'
).first()
if not tenant:
return None
tenant_account = db.session.query(Account, TenantAccountJoin.role).join(
TenantAccountJoin, Account.id == TenantAccountJoin.account_id
).filter(Account.email == email, TenantAccountJoin.tenant_id == tenant.id).first()
if not tenant_account:
return None
account_id = cls._get_account_id_by_invite_token(workspace_id, email, token)
if not account_id:
return None
account = tenant_account[0]
if not account:
return None
if account_id != str(account.id):
return None
return account
@classmethod
def _get_account_id_by_invite_token(cls, workspace_id: str, email: str, token: str) -> Optional[str]:
email_hash = sha256(email.encode()).hexdigest()
cache_key = 'member_invite_token:{}, {}:{}'.format(workspace_id, email_hash, token)
account_id = redis_client.get(cache_key)
if not account_id:
return None
return account_id.decode('utf-8')
...@@ -7,6 +7,30 @@ from models.account import Account ...@@ -7,6 +7,30 @@ from models.account import Account
from services.dataset_service import DatasetService from services.dataset_service import DatasetService
from core.llm.llm_builder import LLMBuilder from core.llm.llm_builder import LLMBuilder
MODEL_PROVIDERS = [
'openai',
'anthropic',
]
MODELS_BY_APP_MODE = {
'chat': [
'claude-instant-1',
'claude-2',
'gpt-4',
'gpt-4-32k',
'gpt-3.5-turbo',
'gpt-3.5-turbo-16k',
],
'completion': [
'claude-instant-1',
'claude-2',
'gpt-4',
'gpt-4-32k',
'gpt-3.5-turbo',
'gpt-3.5-turbo-16k',
'text-davinci-003',
]
}
SUPPORT_AGENT_MODELS = [ SUPPORT_AGENT_MODELS = [
"gpt-4", "gpt-4",
...@@ -137,7 +161,7 @@ class AppModelConfigService: ...@@ -137,7 +161,7 @@ class AppModelConfigService:
if not isinstance(config["speech_to_text"]["enabled"], bool): if not isinstance(config["speech_to_text"]["enabled"], bool):
raise ValueError("enabled in speech_to_text must be of boolean type") raise ValueError("enabled in speech_to_text must be of boolean type")
provider_name = LLMBuilder.get_default_provider(account.current_tenant_id) provider_name = LLMBuilder.get_default_provider(account.current_tenant_id, 'whisper-1')
if config["speech_to_text"]["enabled"] and provider_name != 'openai': if config["speech_to_text"]["enabled"] and provider_name != 'openai':
raise ValueError("provider not support speech to text") raise ValueError("provider not support speech to text")
...@@ -192,14 +216,14 @@ class AppModelConfigService: ...@@ -192,14 +216,14 @@ class AppModelConfigService:
raise ValueError("model must be of object type") raise ValueError("model must be of object type")
# model.provider # model.provider
if 'provider' not in config["model"] or config["model"]["provider"] != "openai": if 'provider' not in config["model"] or config["model"]["provider"] not in MODEL_PROVIDERS:
raise ValueError("model.provider must be 'openai'") raise ValueError(f"model.provider is required and must be in {str(MODEL_PROVIDERS)}")
# model.name # model.name
if 'name' not in config["model"]: if 'name' not in config["model"]:
raise ValueError("model.name is required") raise ValueError("model.name is required")
if config["model"]["name"] not in llm_constant.models_by_mode[mode]: if config["model"]["name"] not in MODELS_BY_APP_MODE[mode]:
raise ValueError("model.name must be in the specified model list") raise ValueError("model.name must be in the specified model list")
# model.completion_params # model.completion_params
......
...@@ -27,7 +27,7 @@ class AudioService: ...@@ -27,7 +27,7 @@ class AudioService:
message = f"Audio size larger than {FILE_SIZE} mb" message = f"Audio size larger than {FILE_SIZE} mb"
raise AudioTooLargeServiceError(message) raise AudioTooLargeServiceError(message)
provider_name = LLMBuilder.get_default_provider(tenant_id) provider_name = LLMBuilder.get_default_provider(tenant_id, 'whisper-1')
if provider_name != ProviderName.OPENAI.value: if provider_name != ProviderName.OPENAI.value:
raise ProviderNotSupportSpeechToTextServiceError() raise ProviderNotSupportSpeechToTextServiceError()
...@@ -37,8 +37,3 @@ class AudioService: ...@@ -37,8 +37,3 @@ class AudioService:
buffer.name = 'temp.mp3' buffer.name = 'temp.mp3'
return Whisper(provider_service.provider).transcribe(buffer) return Whisper(provider_service.provider).transcribe(buffer)
\ No newline at end of file
...@@ -4,6 +4,9 @@ import datetime ...@@ -4,6 +4,9 @@ import datetime
import time import time
import random import random
from typing import Optional, List from typing import Optional, List
from flask import current_app
from extensions.ext_redis import redis_client from extensions.ext_redis import redis_client
from flask_login import current_user from flask_login import current_user
...@@ -374,6 +377,12 @@ class DocumentService: ...@@ -374,6 +377,12 @@ class DocumentService:
def save_document_with_dataset_id(dataset: Dataset, document_data: dict, def save_document_with_dataset_id(dataset: Dataset, document_data: dict,
account: Account, dataset_process_rule: Optional[DatasetProcessRule] = None, account: Account, dataset_process_rule: Optional[DatasetProcessRule] = None,
created_from: str = 'web'): created_from: str = 'web'):
# check document limit
if current_app.config['EDITION'] == 'CLOUD':
documents_count = DocumentService.get_tenant_documents_count()
tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
if documents_count > tenant_document_count:
raise ValueError(f"over document limit {tenant_document_count}.")
# if dataset is empty, update dataset data_source_type # if dataset is empty, update dataset data_source_type
if not dataset.data_source_type: if not dataset.data_source_type:
dataset.data_source_type = document_data["data_source"]["type"] dataset.data_source_type = document_data["data_source"]["type"]
...@@ -521,6 +530,14 @@ class DocumentService: ...@@ -521,6 +530,14 @@ class DocumentService:
) )
return document return document
@staticmethod
def get_tenant_documents_count():
documents_count = Document.query.filter(Document.completed_at.isnot(None),
Document.enabled == True,
Document.archived == False,
Document.tenant_id == current_user.current_tenant_id).count()
return documents_count
@staticmethod @staticmethod
def update_document_with_dataset_id(dataset: Dataset, document_data: dict, def update_document_with_dataset_id(dataset: Dataset, document_data: dict,
account: Account, dataset_process_rule: Optional[DatasetProcessRule] = None, account: Account, dataset_process_rule: Optional[DatasetProcessRule] = None,
...@@ -616,6 +633,12 @@ class DocumentService: ...@@ -616,6 +633,12 @@ class DocumentService:
@staticmethod @staticmethod
def save_document_without_dataset_id(tenant_id: str, document_data: dict, account: Account): def save_document_without_dataset_id(tenant_id: str, document_data: dict, account: Account):
# check document limit
if current_app.config['EDITION'] == 'CLOUD':
documents_count = DocumentService.get_tenant_documents_count()
tenant_document_count = int(current_app.config['TENANT_DOCUMENT_COUNT'])
if documents_count > tenant_document_count:
raise ValueError(f"over document limit {tenant_document_count}.")
# save dataset # save dataset
dataset = Dataset( dataset = Dataset(
tenant_id=tenant_id, tenant_id=tenant_id,
......
...@@ -31,7 +31,7 @@ class HitTestingService: ...@@ -31,7 +31,7 @@ class HitTestingService:
model_credentials = LLMBuilder.get_model_credentials( model_credentials = LLMBuilder.get_model_credentials(
tenant_id=dataset.tenant_id, tenant_id=dataset.tenant_id,
model_provider=LLMBuilder.get_default_provider(dataset.tenant_id), model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'),
model_name='text-embedding-ada-002' model_name='text-embedding-ada-002'
) )
......
...@@ -10,50 +10,40 @@ from models.provider import * ...@@ -10,50 +10,40 @@ from models.provider import *
class ProviderService: class ProviderService:
@staticmethod @staticmethod
def init_supported_provider(tenant, edition): def init_supported_provider(tenant):
"""Initialize the model provider, check whether the supported provider has a record""" """Initialize the model provider, check whether the supported provider has a record"""
providers = Provider.query.filter_by(tenant_id=tenant.id).all() need_init_provider_names = [ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value, ProviderName.ANTHROPIC.value]
openai_provider_exists = False providers = db.session.query(Provider).filter(
azure_openai_provider_exists = False Provider.tenant_id == tenant.id,
Provider.provider_type == ProviderType.CUSTOM.value,
# TODO: The cloud version needs to construct the data of the SYSTEM type Provider.provider_name.in_(need_init_provider_names)
).all()
exists_provider_names = []
for provider in providers: for provider in providers:
if provider.provider_name == ProviderName.OPENAI.value and provider.provider_type == ProviderType.CUSTOM.value: exists_provider_names.append(provider.provider_name)
openai_provider_exists = True
if provider.provider_name == ProviderName.AZURE_OPENAI.value and provider.provider_type == ProviderType.CUSTOM.value:
azure_openai_provider_exists = True
# Initialize the model provider, check whether the supported provider has a record not_exists_provider_names = list(set(need_init_provider_names) - set(exists_provider_names))
# Create default providers if they don't exist if not_exists_provider_names:
if not openai_provider_exists: # Initialize the model provider, check whether the supported provider has a record
openai_provider = Provider( for provider_name in not_exists_provider_names:
tenant_id=tenant.id, provider = Provider(
provider_name=ProviderName.OPENAI.value, tenant_id=tenant.id,
provider_type=ProviderType.CUSTOM.value, provider_name=provider_name,
is_valid=False provider_type=ProviderType.CUSTOM.value,
) is_valid=False
db.session.add(openai_provider) )
db.session.add(provider)
if not azure_openai_provider_exists:
azure_openai_provider = Provider(
tenant_id=tenant.id,
provider_name=ProviderName.AZURE_OPENAI.value,
provider_type=ProviderType.CUSTOM.value,
is_valid=False
)
db.session.add(azure_openai_provider)
if not openai_provider_exists or not azure_openai_provider_exists:
db.session.commit() db.session.commit()
@staticmethod @staticmethod
def get_obfuscated_api_key(tenant, provider_name: ProviderName): def get_obfuscated_api_key(tenant, provider_name: ProviderName, only_custom: bool = False):
llm_provider_service = LLMProviderService(tenant.id, provider_name.value) llm_provider_service = LLMProviderService(tenant.id, provider_name.value)
return llm_provider_service.get_provider_configs(obfuscated=True) return llm_provider_service.get_provider_configs(obfuscated=True, only_custom=only_custom)
@staticmethod @staticmethod
def get_token_type(tenant, provider_name: ProviderName): def get_token_type(tenant, provider_name: ProviderName):
...@@ -73,7 +63,7 @@ class ProviderService: ...@@ -73,7 +63,7 @@ class ProviderService:
return llm_provider_service.get_encrypted_token(configs) return llm_provider_service.get_encrypted_token(configs)
@staticmethod @staticmethod
def create_system_provider(tenant: Tenant, provider_name: str = ProviderName.OPENAI.value, def create_system_provider(tenant: Tenant, provider_name: str = ProviderName.OPENAI.value, quota_limit: int = 200,
is_valid: bool = True): is_valid: bool = True):
if current_app.config['EDITION'] != 'CLOUD': if current_app.config['EDITION'] != 'CLOUD':
return return
...@@ -90,7 +80,7 @@ class ProviderService: ...@@ -90,7 +80,7 @@ class ProviderService:
provider_name=provider_name, provider_name=provider_name,
provider_type=ProviderType.SYSTEM.value, provider_type=ProviderType.SYSTEM.value,
quota_type=ProviderQuotaType.TRIAL.value, quota_type=ProviderQuotaType.TRIAL.value,
quota_limit=200, quota_limit=quota_limit,
encrypted_config='', encrypted_config='',
is_valid=is_valid, is_valid=is_valid,
) )
......
from extensions.ext_database import db from extensions.ext_database import db
from models.account import Tenant from models.account import Tenant
from models.provider import Provider, ProviderType from models.provider import Provider, ProviderType, ProviderName
class WorkspaceService: class WorkspaceService:
...@@ -33,7 +33,7 @@ class WorkspaceService: ...@@ -33,7 +33,7 @@ class WorkspaceService:
if provider.is_valid and provider.encrypted_config: if provider.is_valid and provider.encrypted_config:
custom_provider = provider custom_provider = provider
elif provider.provider_type == ProviderType.SYSTEM.value: elif provider.provider_type == ProviderType.SYSTEM.value:
if provider.is_valid: if provider.provider_name == ProviderName.OPENAI.value and provider.is_valid:
system_provider = provider system_provider = provider
if system_provider and not custom_provider: if system_provider and not custom_provider:
......
import logging
import time
import click
from celery import shared_task
from flask import current_app
from extensions.ext_mail import mail
@shared_task
def send_invite_member_mail_task(to: str, token: str, inviter_name: str, workspace_id: str, workspace_name: str):
"""
Async Send invite member mail
:param to
:param token
:param inviter_name
:param workspace_id
:param workspace_name
Usage: send_invite_member_mail_task.delay(to, token, inviter_name, workspace_id, workspace_name)
"""
if not mail.is_inited():
return
logging.info(click.style('Start send invite member mail to {} in workspace {}'.format(to, workspace_name),
fg='green'))
start_at = time.perf_counter()
try:
mail.send(
to=to,
subject="{} invited you to join {}".format(inviter_name, workspace_name),
html="""<p>Hi there,</p>
<p>{inviter_name} invited you to join {workspace_name}.</p>
<p>Click <a href="{url}">here</a> to join.</p>
<p>Thanks,</p>
<p>Dify Team</p>""".format(inviter_name=inviter_name, workspace_name=workspace_name,
url='{}/activate?workspace_id={}&email={}&token={}'.format(
current_app.config.get("CONSOLE_WEB_URL"),
workspace_id,
to,
token)
)
)
end_at = time.perf_counter()
logging.info(
click.style('Send invite member mail to {} succeeded: latency: {}'.format(to, end_at - start_at),
fg='green'))
except Exception:
logging.exception("Send invite member mail to {} failed".format(to))
...@@ -2,7 +2,7 @@ version: '3.1' ...@@ -2,7 +2,7 @@ version: '3.1'
services: services:
# API service # API service
api: api:
image: langgenius/dify-api:0.3.7 image: langgenius/dify-api:0.3.9
restart: always restart: always
environment: environment:
# Startup mode, 'api' starts the API server. # Startup mode, 'api' starts the API server.
...@@ -11,18 +11,26 @@ services: ...@@ -11,18 +11,26 @@ services:
LOG_LEVEL: INFO LOG_LEVEL: INFO
# A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`. # A secret key that is used for securely signing the session cookie and encrypting sensitive information on the database. You can generate a strong key using `openssl rand -base64 42`.
SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U SECRET_KEY: sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U
# The base URL of console application, refers to the Console base URL of WEB service if console domain is # The base URL of console application web frontend, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain. # different from api or web app domain.
# example: http://cloud.dify.ai # example: http://cloud.dify.ai
CONSOLE_URL: '' CONSOLE_WEB_URL: ''
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain.
# example: http://cloud.dify.ai
CONSOLE_API_URL: ''
# The URL for Service API endpoints,refers to the base URL of the current API service if api domain is # The URL for Service API endpoints,refers to the base URL of the current API service if api domain is
# different from console domain. # different from console domain.
# example: http://api.dify.ai # example: http://api.dify.ai
API_URL: '' SERVICE_API_URL: ''
# The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from # The URL for Web APP api server, refers to the Web App base URL of WEB service if web app domain is different from
# console or api domain.
# example: http://udify.app
APP_API_URL: ''
# The URL for Web APP frontend, refers to the Web App base URL of WEB service if web app domain is different from
# console or api domain. # console or api domain.
# example: http://udify.app # example: http://udify.app
APP_URL: '' APP_WEB_URL: ''
# When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed. # When enabled, migrations will be executed prior to application startup and the application will start after the migrations have completed.
MIGRATION_ENABLED: 'true' MIGRATION_ENABLED: 'true'
# The configurations of postgres database connection. # The configurations of postgres database connection.
...@@ -93,6 +101,12 @@ services: ...@@ -93,6 +101,12 @@ services:
QDRANT_URL: 'https://your-qdrant-cluster-url.qdrant.tech/' QDRANT_URL: 'https://your-qdrant-cluster-url.qdrant.tech/'
# The Qdrant API key. # The Qdrant API key.
QDRANT_API_KEY: 'ak-difyai' QDRANT_API_KEY: 'ak-difyai'
# Mail configuration, support: resend
MAIL_TYPE: ''
# default send from email address, if not specified
MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply <no-reply@dify.ai>)'
# the api-key for resend (https://resend.com)
RESEND_API_KEY: ''
# The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled. # The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
SENTRY_DSN: '' SENTRY_DSN: ''
# The sample rate for Sentry events. Default: `1.0` # The sample rate for Sentry events. Default: `1.0`
...@@ -110,7 +124,7 @@ services: ...@@ -110,7 +124,7 @@ services:
# worker service # worker service
# The Celery worker for processing the queue. # The Celery worker for processing the queue.
worker: worker:
image: langgenius/dify-api:0.3.7 image: langgenius/dify-api:0.3.9
restart: always restart: always
environment: environment:
# Startup mode, 'worker' starts the Celery worker for processing the queue. # Startup mode, 'worker' starts the Celery worker for processing the queue.
...@@ -146,6 +160,12 @@ services: ...@@ -146,6 +160,12 @@ services:
VECTOR_STORE: weaviate VECTOR_STORE: weaviate
WEAVIATE_ENDPOINT: http://weaviate:8080 WEAVIATE_ENDPOINT: http://weaviate:8080
WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih WEAVIATE_API_KEY: WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
# Mail configuration, support: resend
MAIL_TYPE: ''
# default send from email address, if not specified
MAIL_DEFAULT_SEND_FROM: 'YOUR EMAIL FROM (eg: no-reply <no-reply@dify.ai>)'
# the api-key for resend (https://resend.com)
RESEND_API_KEY: ''
depends_on: depends_on:
- db - db
- redis - redis
...@@ -156,18 +176,18 @@ services: ...@@ -156,18 +176,18 @@ services:
# Frontend web application. # Frontend web application.
web: web:
image: langgenius/dify-web:0.3.7 image: langgenius/dify-web:0.3.9
restart: always restart: always
environment: environment:
EDITION: SELF_HOSTED EDITION: SELF_HOSTED
# The base URL of console application, refers to the Console base URL of WEB service if console domain is # The base URL of console application api server, refers to the Console base URL of WEB service if console domain is
# different from api or web app domain. # different from api or web app domain.
# example: http://cloud.dify.ai # example: http://cloud.dify.ai
CONSOLE_URL: '' CONSOLE_API_URL: ''
# The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from # The URL for Web APP api server, refers to the Web App base URL of WEB service if web app domain is different from
# console or api domain. # console or api domain.
# example: http://udify.app # example: http://udify.app
APP_URL: '' APP_API_URL: ''
# The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled. # The DSN for Sentry error reporting. If not set, Sentry error reporting will be disabled.
SENTRY_DSN: '' SENTRY_DSN: ''
......
...@@ -4,8 +4,8 @@ LABEL maintainer="takatost@gmail.com" ...@@ -4,8 +4,8 @@ LABEL maintainer="takatost@gmail.com"
ENV EDITION SELF_HOSTED ENV EDITION SELF_HOSTED
ENV DEPLOY_ENV PRODUCTION ENV DEPLOY_ENV PRODUCTION
ENV CONSOLE_URL http://127.0.0.1:5001 ENV CONSOLE_API_URL http://127.0.0.1:5001
ENV APP_URL http://127.0.0.1:5001 ENV APP_API_URL http://127.0.0.1:5001
EXPOSE 3000 EXPOSE 3000
......
'use client'
import { useState } from 'react'
import { useTranslation } from 'react-i18next'
import useSWR from 'swr'
import { useSearchParams } from 'next/navigation'
import cn from 'classnames'
import Link from 'next/link'
import { CheckCircleIcon } from '@heroicons/react/24/solid'
import style from './style.module.css'
import Button from '@/app/components/base/button'
import { SimpleSelect } from '@/app/components/base/select'
import { timezones } from '@/utils/timezone'
import { languageMaps, languages } from '@/utils/language'
import { activateMember, invitationCheck } from '@/service/common'
import Toast from '@/app/components/base/toast'
import Loading from '@/app/components/base/loading'
const validPassword = /^(?=.*[a-zA-Z])(?=.*\d).{8,}$/
const ActivateForm = () => {
const { t } = useTranslation()
const searchParams = useSearchParams()
const workspaceID = searchParams.get('workspace_id')
const email = searchParams.get('email')
const token = searchParams.get('token')
const checkParams = {
url: '/activate/check',
params: {
workspace_id: workspaceID,
email,
token,
},
}
const { data: checkRes, mutate: recheck } = useSWR(checkParams, invitationCheck, {
revalidateOnFocus: false,
})
const [name, setName] = useState('')
const [password, setPassword] = useState('')
const [timezone, setTimezone] = useState('Asia/Shanghai')
const [language, setLanguage] = useState('en-US')
const [showSuccess, setShowSuccess] = useState(false)
const showErrorMessage = (message: string) => {
Toast.notify({
type: 'error',
message,
})
}
const valid = () => {
if (!name.trim()) {
showErrorMessage(t('login.error.nameEmpty'))
return false
}
if (!password.trim()) {
showErrorMessage(t('login.error.passwordEmpty'))
return false
}
if (!validPassword.test(password))
showErrorMessage(t('login.error.passwordInvalid'))
return true
}
const handleActivate = async () => {
if (!valid())
return
try {
await activateMember({
url: '/activate',
body: {
workspace_id: workspaceID,
email,
token,
name,
password,
interface_language: language,
timezone,
},
})
setShowSuccess(true)
}
catch {
recheck()
}
}
return (
<div className={
cn(
'flex flex-col items-center w-full grow items-center justify-center',
'px-6',
'md:px-[108px]',
)
}>
{!checkRes && <Loading/>}
{checkRes && !checkRes.is_valid && (
<div className="flex flex-col md:w-[400px]">
<div className="w-full mx-auto">
<div className="mb-3 flex justify-center items-center w-20 h-20 p-5 rounded-[20px] border border-gray-100 shadow-lg text-[40px] font-bold">🤷‍♂️</div>
<h2 className="text-[32px] font-bold text-gray-900">{t('login.invalid')}</h2>
</div>
<div className="w-full mx-auto mt-6">
<Button type='primary' className='w-full !fone-medium !text-sm'>
<a href="https://dify.ai">{t('login.explore')}</a>
</Button>
</div>
</div>
)}
{checkRes && checkRes.is_valid && !showSuccess && (
<div className='flex flex-col md:w-[400px]'>
<div className="w-full mx-auto">
<div className={`mb-3 flex justify-center items-center w-20 h-20 p-5 rounded-[20px] border border-gray-100 shadow-lg text-[40px] font-bold ${style.logo}`}>
</div>
<h2 className="text-[32px] font-bold text-gray-900">
{`${t('login.join')} ${checkRes.workspace_name}`}
</h2>
<p className='mt-1 text-sm text-gray-600 '>
{`${t('login.joinTipStart')} ${checkRes.workspace_name} ${t('login.joinTipEnd')}`}
</p>
</div>
<div className="w-full mx-auto mt-6">
<div className="bg-white">
{/* username */}
<div className='mb-5'>
<label htmlFor="name" className="my-2 flex items-center justify-between text-sm font-medium text-gray-900">
{t('login.name')}
</label>
<div className="mt-1 relative rounded-md shadow-sm">
<input
id="name"
type="text"
value={name}
onChange={e => setName(e.target.value)}
placeholder={t('login.namePlaceholder') || ''}
className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm pr-10'}
/>
</div>
</div>
{/* password */}
<div className='mb-5'>
<label htmlFor="password" className="my-2 flex items-center justify-between text-sm font-medium text-gray-900">
{t('login.password')}
</label>
<div className="mt-1 relative rounded-md shadow-sm">
<input
id="password"
type='password'
value={password}
onChange={e => setPassword(e.target.value)}
placeholder={t('login.passwordPlaceholder') || ''}
className={'appearance-none block w-full rounded-lg pl-[14px] px-3 py-2 border border-gray-200 hover:border-gray-300 hover:shadow-sm focus:outline-none focus:ring-primary-500 focus:border-primary-500 placeholder-gray-400 caret-primary-600 sm:text-sm pr-10'}
/>
</div>
<div className='mt-1 text-xs text-gray-500'>{t('login.error.passwordInvalid')}</div>
</div>
{/* language */}
<div className='mb-5'>
<label htmlFor="name" className="my-2 flex items-center justify-between text-sm font-medium text-gray-900">
{t('login.interfaceLanguage')}
</label>
<div className="relative mt-1 rounded-md shadow-sm">
<SimpleSelect
defaultValue={languageMaps.en}
items={languages}
onSelect={(item) => {
setLanguage(item.value as string)
}}
/>
</div>
</div>
{/* timezone */}
<div className='mb-4'>
<label htmlFor="timezone" className="block text-sm font-medium text-gray-700">
{t('login.timezone')}
</label>
<div className="relative mt-1 rounded-md shadow-sm">
<SimpleSelect
defaultValue={timezone}
items={timezones}
onSelect={(item) => {
setTimezone(item.value as string)
}}
/>
</div>
</div>
<div>
<Button
type='primary'
className='w-full !fone-medium !text-sm'
onClick={handleActivate}
>
{`${t('login.join')} ${checkRes.workspace_name}`}
</Button>
</div>
<div className="block w-hull mt-2 text-xs text-gray-600">
{t('login.license.tip')}
&nbsp;
<Link
className='text-primary-600'
target={'_blank'}
href='https://docs.dify.ai/community/open-source'
>{t('login.license.link')}</Link>
</div>
</div>
</div>
</div>
)}
{checkRes && checkRes.is_valid && showSuccess && (
<div className="flex flex-col md:w-[400px]">
<div className="w-full mx-auto">
<div className="mb-3 flex justify-center items-center w-20 h-20 p-5 rounded-[20px] border border-gray-100 shadow-lg text-[40px] font-bold">
<CheckCircleIcon className='w-10 h-10 text-[#039855]' />
</div>
<h2 className="text-[32px] font-bold text-gray-900">
{`${t('login.activatedTipStart')} ${checkRes.workspace_name} ${t('login.activatedTipEnd')}`}
</h2>
</div>
<div className="w-full mx-auto mt-6">
<Button type='primary' className='w-full !fone-medium !text-sm'>
<a href="/signin">{t('login.activated')}</a>
</Button>
</div>
</div>
)}
</div>
)
}
export default ActivateForm
import React from 'react'
import cn from 'classnames'
import Header from '../signin/_header'
import style from '../signin/page.module.css'
import ActivateForm from './activateForm'
const Activate = () => {
return (
<div className={cn(
style.background,
'flex w-full min-h-screen',
'sm:p-4 lg:p-8',
'gap-x-20',
'justify-center lg:justify-start',
)}>
<div className={
cn(
'flex w-full flex-col bg-white shadow rounded-2xl shrink-0',
'space-between',
)
}>
<Header />
<ActivateForm />
<div className='px-8 py-6 text-sm font-normal text-gray-500'>
© {new Date().getFullYear()} Dify, Inc. All rights reserved.
</div>
</div>
</div>
)
}
export default Activate
.logo {
background: #fff center no-repeat url(./team-28x28.png);
background-size: 56px;
}
\ No newline at end of file
...@@ -65,6 +65,7 @@ export type IChatProps = { ...@@ -65,6 +65,7 @@ export type IChatProps = {
isShowSuggestion?: boolean isShowSuggestion?: boolean
suggestionList?: string[] suggestionList?: string[]
isShowSpeechToText?: boolean isShowSpeechToText?: boolean
answerIconClassName?: string
} }
export type MessageMore = { export type MessageMore = {
...@@ -174,10 +175,11 @@ type IAnswerProps = { ...@@ -174,10 +175,11 @@ type IAnswerProps = {
onSubmitAnnotation?: SubmitAnnotationFunc onSubmitAnnotation?: SubmitAnnotationFunc
displayScene: DisplayScene displayScene: DisplayScene
isResponsing?: boolean isResponsing?: boolean
answerIconClassName?: string
} }
// The component needs to maintain its own state to control whether to display input component // The component needs to maintain its own state to control whether to display input component
const Answer: FC<IAnswerProps> = ({ item, feedbackDisabled = false, isHideFeedbackEdit = false, onFeedback, onSubmitAnnotation, displayScene = 'web', isResponsing }) => { const Answer: FC<IAnswerProps> = ({ item, feedbackDisabled = false, isHideFeedbackEdit = false, onFeedback, onSubmitAnnotation, displayScene = 'web', isResponsing, answerIconClassName }) => {
const { id, content, more, feedback, adminFeedback, annotation: initAnnotation } = item const { id, content, more, feedback, adminFeedback, annotation: initAnnotation } = item
const [showEdit, setShowEdit] = useState(false) const [showEdit, setShowEdit] = useState(false)
const [loading, setLoading] = useState(false) const [loading, setLoading] = useState(false)
...@@ -292,7 +294,7 @@ const Answer: FC<IAnswerProps> = ({ item, feedbackDisabled = false, isHideFeedba ...@@ -292,7 +294,7 @@ const Answer: FC<IAnswerProps> = ({ item, feedbackDisabled = false, isHideFeedba
return ( return (
<div key={id}> <div key={id}>
<div className='flex items-start'> <div className='flex items-start'>
<div className={`${s.answerIcon} w-10 h-10 shrink-0`}> <div className={`${s.answerIcon} ${answerIconClassName} w-10 h-10 shrink-0`}>
{isResponsing {isResponsing
&& <div className={s.typeingIcon}> && <div className={s.typeingIcon}>
<LoadingAnim type='avatar' /> <LoadingAnim type='avatar' />
...@@ -428,6 +430,7 @@ const Chat: FC<IChatProps> = ({ ...@@ -428,6 +430,7 @@ const Chat: FC<IChatProps> = ({
isShowSuggestion, isShowSuggestion,
suggestionList, suggestionList,
isShowSpeechToText, isShowSpeechToText,
answerIconClassName,
}) => { }) => {
const { t } = useTranslation() const { t } = useTranslation()
const { notify } = useContext(ToastContext) const { notify } = useContext(ToastContext)
...@@ -520,6 +523,7 @@ const Chat: FC<IChatProps> = ({ ...@@ -520,6 +523,7 @@ const Chat: FC<IChatProps> = ({
onSubmitAnnotation={onSubmitAnnotation} onSubmitAnnotation={onSubmitAnnotation}
displayScene={displayScene ?? 'web'} displayScene={displayScene ?? 'web'}
isResponsing={isResponsing && isLast} isResponsing={isResponsing && isLast}
answerIconClassName={answerIconClassName}
/> />
} }
return <Question key={item.id} id={item.id} content={item.content} more={item.more} useCurrentUserAvatar={useCurrentUserAvatar} /> return <Question key={item.id} id={item.id} content={item.content} more={item.more} useCurrentUserAvatar={useCurrentUserAvatar} />
......
...@@ -372,7 +372,7 @@ const Debug: FC<IDebug> = ({ ...@@ -372,7 +372,7 @@ const Debug: FC<IDebug> = ({
{/* Chat */} {/* Chat */}
{mode === AppType.chat && ( {mode === AppType.chat && (
<div className="mt-[34px] h-full flex flex-col"> <div className="mt-[34px] h-full flex flex-col">
<div className={cn(doShowSuggestion ? 'pb-[140px]' : (isResponsing ? 'pb-[113px]' : 'pb-[66px]'), 'relative mt-1.5 grow h-[200px] overflow-hidden')}> <div className={cn(doShowSuggestion ? 'pb-[140px]' : (isResponsing ? 'pb-[113px]' : 'pb-[76px]'), 'relative mt-1.5 grow h-[200px] overflow-hidden')}>
<div className="h-full overflow-y-auto overflow-x-hidden" ref={chatListDomRef}> <div className="h-full overflow-y-auto overflow-x-hidden" ref={chatListDomRef}>
<Chat <Chat
chatList={chatList} chatList={chatList}
......
...@@ -16,6 +16,7 @@ import ConfigModel from '@/app/components/app/configuration/config-model' ...@@ -16,6 +16,7 @@ import ConfigModel from '@/app/components/app/configuration/config-model'
import Config from '@/app/components/app/configuration/config' import Config from '@/app/components/app/configuration/config'
import Debug from '@/app/components/app/configuration/debug' import Debug from '@/app/components/app/configuration/debug'
import Confirm from '@/app/components/base/confirm' import Confirm from '@/app/components/base/confirm'
import { ProviderType } from '@/types/app'
import type { AppDetailResponse } from '@/models/app' import type { AppDetailResponse } from '@/models/app'
import { ToastContext } from '@/app/components/base/toast' import { ToastContext } from '@/app/components/base/toast'
import { fetchTenantInfo } from '@/service/common' import { fetchTenantInfo } from '@/service/common'
...@@ -67,7 +68,7 @@ const Configuration: FC = () => { ...@@ -67,7 +68,7 @@ const Configuration: FC = () => {
frequency_penalty: 1, // -2-2 frequency_penalty: 1, // -2-2
}) })
const [modelConfig, doSetModelConfig] = useState<ModelConfig>({ const [modelConfig, doSetModelConfig] = useState<ModelConfig>({
provider: 'openai', provider: ProviderType.openai,
model_id: 'gpt-3.5-turbo', model_id: 'gpt-3.5-turbo',
configs: { configs: {
prompt_template: '', prompt_template: '',
...@@ -84,8 +85,9 @@ const Configuration: FC = () => { ...@@ -84,8 +85,9 @@ const Configuration: FC = () => {
doSetModelConfig(newModelConfig) doSetModelConfig(newModelConfig)
} }
const setModelId = (modelId: string) => { const setModelId = (modelId: string, provider: ProviderType) => {
const newModelConfig = produce(modelConfig, (draft: any) => { const newModelConfig = produce(modelConfig, (draft: any) => {
draft.provider = provider
draft.model_id = modelId draft.model_id = modelId
}) })
setModelConfig(newModelConfig) setModelConfig(newModelConfig)
......
...@@ -184,7 +184,11 @@ const GenerationItem: FC<IGenerationItemProps> = ({ ...@@ -184,7 +184,11 @@ const GenerationItem: FC<IGenerationItemProps> = ({
{taskId} {taskId}
</div>) </div>)
} }
<Markdown content={content} /> <div className='flex'>
<div className='grow w-0'>
<Markdown content={content} />
</div>
</div>
{messageId && ( {messageId && (
<div className='flex items-center justify-between mt-3'> <div className='flex items-center justify-between mt-3'>
<div className='flex items-center'> <div className='flex items-center'>
......
...@@ -19,6 +19,7 @@ const AutoHeightTextarea = forwardRef( ...@@ -19,6 +19,7 @@ const AutoHeightTextarea = forwardRef(
{ value, onChange, placeholder, className, minHeight = 36, maxHeight = 96, autoFocus, controlFocus, onKeyDown, onKeyUp }: IProps, { value, onChange, placeholder, className, minHeight = 36, maxHeight = 96, autoFocus, controlFocus, onKeyDown, onKeyUp }: IProps,
outerRef: any, outerRef: any,
) => { ) => {
// eslint-disable-next-line react-hooks/rules-of-hooks
const ref = outerRef || useRef<HTMLTextAreaElement>(null) const ref = outerRef || useRef<HTMLTextAreaElement>(null)
const doFocus = () => { const doFocus = () => {
...@@ -54,13 +55,20 @@ const AutoHeightTextarea = forwardRef( ...@@ -54,13 +55,20 @@ const AutoHeightTextarea = forwardRef(
return ( return (
<div className='relative'> <div className='relative'>
<div className={cn(className, 'invisible whitespace-pre-wrap break-all overflow-y-auto')} style={{ minHeight, maxHeight }}> <div className={cn(className, 'invisible whitespace-pre-wrap break-all overflow-y-auto')} style={{
minHeight,
maxHeight,
paddingRight: (value && value.trim().length > 10000) ? 140 : 130,
}}>
{!value ? placeholder : value.replace(/\n$/, '\n ')} {!value ? placeholder : value.replace(/\n$/, '\n ')}
</div> </div>
<textarea <textarea
ref={ref} ref={ref}
autoFocus={autoFocus} autoFocus={autoFocus}
className={cn(className, 'absolute inset-0 resize-none overflow-hidden')} className={cn(className, 'absolute inset-0 resize-none overflow-auto')}
style={{
paddingRight: (value && value.trim().length > 10000) ? 140 : 130,
}}
placeholder={placeholder} placeholder={placeholder}
onChange={onChange} onChange={onChange}
onKeyDown={onKeyDown} onKeyDown={onKeyDown}
......
...@@ -11,7 +11,7 @@ export const RFC_LOCALES = [ ...@@ -11,7 +11,7 @@ export const RFC_LOCALES = [
{ value: 'en-US', name: 'EN' }, { value: 'en-US', name: 'EN' },
{ value: 'zh-Hans', name: '简体中文' }, { value: 'zh-Hans', name: '简体中文' },
] ]
interface ISelectProps { type ISelectProps = {
items: Array<{ value: string; name: string }> items: Array<{ value: string; name: string }>
value?: string value?: string
className?: string className?: string
...@@ -21,7 +21,7 @@ interface ISelectProps { ...@@ -21,7 +21,7 @@ interface ISelectProps {
export default function Select({ export default function Select({
items, items,
value, value,
onChange onChange,
}: ISelectProps) { }: ISelectProps) {
const item = items.filter(item => item.value === value)[0] const item = items.filter(item => item.value === value)[0]
...@@ -29,11 +29,12 @@ export default function Select({ ...@@ -29,11 +29,12 @@ export default function Select({
<div className="w-56 text-right"> <div className="w-56 text-right">
<Menu as="div" className="relative inline-block text-left"> <Menu as="div" className="relative inline-block text-left">
<div> <div>
<Menu.Button className="inline-flex w-full justify-center items-center <Menu.Button className="inline-flex w-full h-[44px]justify-center items-center
rounded-lg px-2 py-1 rounded-lg px-[10px] py-[6px]
text-gray-600 text-xs font-medium text-gray-900 text-[13px] font-medium
border border-gray-200"> border border-gray-200
<GlobeAltIcon className="w-5 h-5 mr-2 " aria-hidden="true" /> hover:bg-gray-100">
<GlobeAltIcon className="w-5 h-5 mr-1" aria-hidden="true" />
{item?.name} {item?.name}
</Menu.Button> </Menu.Button>
</div> </div>
...@@ -46,14 +47,14 @@ export default function Select({ ...@@ -46,14 +47,14 @@ export default function Select({
leaveFrom="transform opacity-100 scale-100" leaveFrom="transform opacity-100 scale-100"
leaveTo="transform opacity-0 scale-95" leaveTo="transform opacity-0 scale-95"
> >
<Menu.Items className="absolute right-0 mt-2 w-28 origin-top-right divide-y divide-gray-100 rounded-md bg-white shadow-lg ring-1 ring-black ring-opacity-5 focus:outline-none"> <Menu.Items className="absolute right-0 mt-2 w-[120px] origin-top-right divide-y divide-gray-100 rounded-md bg-white shadow-lg ring-1 ring-black ring-opacity-5 focus:outline-none">
<div className="px-1 py-1 "> <div className="px-1 py-1 ">
{items.map((item) => { {items.map((item) => {
return <Menu.Item key={item.value}> return <Menu.Item key={item.value}>
{({ active }) => ( {({ active }) => (
<button <button
className={`${active ? 'bg-gray-100' : '' className={`${active ? 'bg-gray-100' : ''
} group flex w-full items-center rounded-md px-2 py-2 text-sm`} } group flex w-full items-center rounded-lg px-3 py-2 text-sm text-gray-700`}
onClick={(evt) => { onClick={(evt) => {
evt.preventDefault() evt.preventDefault()
onChange && onChange(item.value) onChange && onChange(item.value)
...@@ -77,7 +78,7 @@ export default function Select({ ...@@ -77,7 +78,7 @@ export default function Select({
export function InputSelect({ export function InputSelect({
items, items,
value, value,
onChange onChange,
}: ISelectProps) { }: ISelectProps) {
const item = items.filter(item => item.value === value)[0] const item = items.filter(item => item.value === value)[0]
return ( return (
...@@ -104,7 +105,7 @@ export function InputSelect({ ...@@ -104,7 +105,7 @@ export function InputSelect({
{({ active }) => ( {({ active }) => (
<button <button
className={`${active ? 'bg-gray-100' : '' className={`${active ? 'bg-gray-100' : ''
} group flex w-full items-center rounded-md px-2 py-2 text-sm`} } group flex w-full items-center rounded-md px-2 py-2 text-sm`}
onClick={() => { onClick={() => {
onChange && onChange(item.value) onChange && onChange(item.value)
}} }}
...@@ -122,4 +123,4 @@ export function InputSelect({ ...@@ -122,4 +123,4 @@ export function InputSelect({
</Menu> </Menu>
</div> </div>
) )
} }
\ No newline at end of file
...@@ -334,6 +334,53 @@ For versatile conversational apps using a Q&A format, call the chat-messages API ...@@ -334,6 +334,53 @@ For versatile conversational apps using a Q&A format, call the chat-messages API
</Row> </Row>
---
<Heading
url='/conversations/{converation_id}'
method='DELETE'
title='Conversation deletion'
name='#delete'
/>
<Row>
<Col>
Delete conversation.
### Request Body
<Properties>
<Property name='user' type='string' key='user'>
The user identifier, defined by the developer, must ensure uniqueness within the app.
</Property>
</Properties>
</Col>
<Col sticky>
<CodeGroup title="Request" tag="DELETE" label="/conversations/{converation_id}" targetCode={`curl --location --request DELETE '${props.appDetail.api_base_url}/conversations/{conversation_id}' \\\n--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{ \n "user": "abc-123"\n}'`}>
```bash {{ title: 'cURL' }}
curl --location --request DELETE 'https://cloud.langgenius.dev/api/conversations/{convsation_id}' \
--header 'Content-Type: application/json' \
--header 'Accept: application/json' \
--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \
--data '{
"user": "abc-123"
}'
```
</CodeGroup>
<CodeGroup title="Response">
```json {{ title: 'Response' }}
{
"result": "success"
}
```
</CodeGroup>
</Col>
</Row>
--- ---
<Heading <Heading
......
...@@ -333,6 +333,52 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' ...@@ -333,6 +333,52 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx'
</Col> </Col>
</Row> </Row>
---
<Heading
url='/conversations/{converation_id}'
method='DELETE'
title='删除会话'
name='#delete'
/>
<Row>
<Col>
删除会话。
### Request Body
<Properties>
<Property name='user' type='string' key='user'>
用户标识,由开发者定义规则,需保证用户标识在应用内唯一。
</Property>
</Properties>
</Col>
<Col sticky>
<CodeGroup title="Request" tag="DELETE" label="/conversations/{converation_id}" targetCode={`curl --location --request DELETE '${props.appDetail.api_base_url}/conversations/{conversation_id}' \\\n--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{ \n "user": "abc-123"\n}'`}>
```bash {{ title: 'cURL' }}
curl --location --request DELETE 'https://cloud.langgenius.dev/api/conversations/{convsation_id}' \
--header 'Content-Type: application/json' \
--header 'Accept: application/json' \
--header 'Authorization: Bearer ENTER-YOUR-SECRET-KEY' \
--data '{
"user": "abc-123"
}'
```
</CodeGroup>
<CodeGroup title="Response">
```json {{ title: 'Response' }}
{
"result": "success"
}
```
</CodeGroup>
</Col>
</Row>
--- ---
......
.logo-icon { .logo-icon {
background: url(../assets/logo-icon.png) center center no-repeat; background: url(../assets/logo-icon.png) center center no-repeat;
background-size: contain; background-size: 32px;
box-shadow: 0px 4px 6px -1px rgba(0, 0, 0, 0.05), 0px 2px 4px -2px rgba(0, 0, 0, 0.05); box-shadow: 0px 4px 6px -1px rgba(0, 0, 0, 0.05), 0px 2px 4px -2px rgba(0, 0, 0, 0.05);
} }
......
...@@ -34,7 +34,7 @@ export default function AccountAbout({ ...@@ -34,7 +34,7 @@ export default function AccountAbout({
<div> <div>
<div className={classNames( <div className={classNames(
s['logo-icon'], s['logo-icon'],
'mx-auto mb-3 w-12 h-12 bg-white rounded border border-gray-200', 'mx-auto mb-3 w-12 h-12 bg-white rounded-xl border border-gray-200',
)} /> )} />
<div className={classNames( <div className={classNames(
s['logo-text'], s['logo-text'],
......
...@@ -25,13 +25,19 @@ const inputClassName = ` ...@@ -25,13 +25,19 @@ const inputClassName = `
text-sm font-normal text-gray-800 text-sm font-normal text-gray-800
` `
const validPassword = /^(?=.*[a-zA-Z])(?=.*\d).{8,}$/
export default function AccountPage() { export default function AccountPage() {
const { t } = useTranslation()
const { mutateUserProfile, userProfile, apps } = useAppContext() const { mutateUserProfile, userProfile, apps } = useAppContext()
const { notify } = useContext(ToastContext) const { notify } = useContext(ToastContext)
const [editNameModalVisible, setEditNameModalVisible] = useState(false) const [editNameModalVisible, setEditNameModalVisible] = useState(false)
const [editName, setEditName] = useState('') const [editName, setEditName] = useState('')
const [editing, setEditing] = useState(false) const [editing, setEditing] = useState(false)
const { t } = useTranslation() const [editPasswordModalVisible, setEditPasswordModalVisible] = useState(false)
const [currentPassword, setCurrentPassword] = useState('')
const [password, setPassword] = useState('')
const [confirmPassword, setConfirmPassword] = useState('')
const handleEditName = () => { const handleEditName = () => {
setEditNameModalVisible(true) setEditNameModalVisible(true)
...@@ -52,6 +58,56 @@ export default function AccountPage() { ...@@ -52,6 +58,56 @@ export default function AccountPage() {
setEditing(false) setEditing(false)
} }
} }
const showErrorMessage = (message: string) => {
notify({
type: 'error',
message,
})
}
const valid = () => {
if (!password.trim()) {
showErrorMessage(t('login.error.passwordEmpty'))
return false
}
if (!validPassword.test(password))
showErrorMessage(t('login.error.passwordInvalid'))
if (password !== confirmPassword)
showErrorMessage(t('common.account.notEqual'))
return true
}
const resetPasswordForm = () => {
setCurrentPassword('')
setPassword('')
setConfirmPassword('')
}
const handleSavePassowrd = async () => {
if (!valid())
return
try {
setEditing(true)
await updateUserProfile({
url: 'account/password',
body: {
password: currentPassword,
new_password: password,
repeat_new_password: confirmPassword,
},
})
notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') })
mutateUserProfile()
setEditPasswordModalVisible(false)
resetPasswordForm()
setEditing(false)
}
catch (e) {
notify({ type: 'error', message: (e as Error).message })
setEditPasswordModalVisible(false)
setEditing(false)
}
}
const renderAppItem = (item: IItem) => { const renderAppItem = (item: IItem) => {
return ( return (
<div className='flex px-3 py-1'> <div className='flex px-3 py-1'>
...@@ -80,51 +136,105 @@ export default function AccountPage() { ...@@ -80,51 +136,105 @@ export default function AccountPage() {
<div className={titleClassName}>{t('common.account.email')}</div> <div className={titleClassName}>{t('common.account.email')}</div>
<div className={classNames(inputClassName, 'cursor-pointer')}>{userProfile.email}</div> <div className={classNames(inputClassName, 'cursor-pointer')}>{userProfile.email}</div>
</div> </div>
{ <div className='mb-8'>
!!apps.length && ( <div className='mb-1 text-sm font-medium text-gray-900'>{t('common.account.password')}</div>
<> <div className='mb-2 text-xs text-gray-500'>{t('common.account.passwordTip')}</div>
<div className='mb-6 border-[0.5px] border-gray-100' /> <Button className='font-medium !text-gray-700 !px-3 !py-[7px] !text-[13px]' onClick={() => setEditPasswordModalVisible(true)}>{userProfile.is_password_set ? t('common.account.resetPassword') : t('common.account.setPassword')}</Button>
<div className='mb-8'> </div>
<div className={titleClassName}>{t('common.account.langGeniusAccount')}</div> {!!apps.length && (
<div className={descriptionClassName}>{t('common.account.langGeniusAccountTip')}</div> <>
<Collapse <div className='mb-6 border-[0.5px] border-gray-100' />
title={`${t('common.account.showAppLength', { length: apps.length })}`} <div className='mb-8'>
items={apps.map(app => ({ key: app.id, name: app.name }))} <div className={titleClassName}>{t('common.account.langGeniusAccount')}</div>
renderItem={renderAppItem} <div className={descriptionClassName}>{t('common.account.langGeniusAccountTip')}</div>
wrapperClassName='mt-2' <Collapse
/> title={`${t('common.account.showAppLength', { length: apps.length })}`}
</div> items={apps.map(app => ({ key: app.id, name: app.name }))}
</> renderItem={renderAppItem}
) wrapperClassName='mt-2'
}
{
editNameModalVisible && (
<Modal
isShow
onClose={() => setEditNameModalVisible(false)}
className={s.modal}
>
<div className='mb-6 text-lg font-medium text-gray-900'>{t('common.account.editName')}</div>
<div className={titleClassName}>{t('common.account.name')}</div>
<input
className={inputClassName}
value={editName}
onChange={e => setEditName(e.target.value)}
/> />
<div className='flex justify-end mt-10'> </div>
<Button className='mr-2 text-sm font-medium' onClick={() => setEditNameModalVisible(false)}>{t('common.operation.cancel')}</Button> </>
<Button )}
disabled={editing || !editName} {editNameModalVisible && (
type='primary' <Modal
className='text-sm font-medium' isShow
onClick={handleSaveName} onClose={() => setEditNameModalVisible(false)}
> className={s.modal}
{t('common.operation.save')} >
</Button> <div className='mb-6 text-lg font-medium text-gray-900'>{t('common.account.editName')}</div>
</div> <div className={titleClassName}>{t('common.account.name')}</div>
</Modal> <input
) className={inputClassName}
} value={editName}
onChange={e => setEditName(e.target.value)}
/>
<div className='flex justify-end mt-10'>
<Button className='mr-2 text-sm font-medium' onClick={() => setEditNameModalVisible(false)}>{t('common.operation.cancel')}</Button>
<Button
disabled={editing || !editName}
type='primary'
className='text-sm font-medium'
onClick={handleSaveName}
>
{t('common.operation.save')}
</Button>
</div>
</Modal>
)}
{editPasswordModalVisible && (
<Modal
isShow
onClose={() => {
setEditPasswordModalVisible(false)
resetPasswordForm()
}}
className={s.modal}
>
<div className='mb-6 text-lg font-medium text-gray-900'>{userProfile.is_password_set ? t('common.account.resetPassword') : t('common.account.setPassword')}</div>
{userProfile.is_password_set && (
<>
<div className={titleClassName}>{t('common.account.currentPassword')}</div>
<input
type="password"
className={inputClassName}
value={currentPassword}
onChange={e => setCurrentPassword(e.target.value)}
/>
</>
)}
<div className='mt-8 text-sm font-medium text-gray-900'>
{userProfile.is_password_set ? t('common.account.newPassword') : t('common.account.password')}
</div>
<input
type="password"
className={inputClassName}
value={password}
onChange={e => setPassword(e.target.value)}
/>
<div className='mt-8 text-sm font-medium text-gray-900'>{t('common.account.confirmPassword')}</div>
<input
type="password"
className={inputClassName}
value={confirmPassword}
onChange={e => setConfirmPassword(e.target.value)}
/>
<div className='flex justify-end mt-10'>
<Button className='mr-2 text-sm font-medium' onClick={() => {
setEditPasswordModalVisible(false)
resetPasswordForm()
}}>{t('common.operation.cancel')}</Button>
<Button
disabled={editing}
type='primary'
className='text-sm font-medium'
onClick={handleSavePassowrd}
>
{userProfile.is_password_set ? t('common.operation.reset') : t('common.operation.save')}
</Button>
</div>
</Modal>
)}
</> </>
) )
} }
'use client' 'use client'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { useState } from 'react' import { useEffect, useRef, useState } from 'react'
import cn from 'classnames'
import { AtSymbolIcon, CubeTransparentIcon, GlobeAltIcon, UserIcon, UsersIcon, XMarkIcon } from '@heroicons/react/24/outline' import { AtSymbolIcon, CubeTransparentIcon, GlobeAltIcon, UserIcon, UsersIcon, XMarkIcon } from '@heroicons/react/24/outline'
import { GlobeAltIcon as GlobalAltIconSolid, UserIcon as UserIconSolid, UsersIcon as UsersIconSolid } from '@heroicons/react/24/solid' import { GlobeAltIcon as GlobalAltIconSolid, UserIcon as UserIconSolid, UsersIcon as UsersIconSolid } from '@heroicons/react/24/solid'
import AccountPage from './account-page' import AccountPage from './account-page'
...@@ -18,6 +19,10 @@ const iconClassName = ` ...@@ -18,6 +19,10 @@ const iconClassName = `
w-4 h-4 ml-3 mr-2 w-4 h-4 ml-3 mr-2
` `
const scrolledClassName = `
border-b shadow-xs bg-white/[.98]
`
type IAccountSettingProps = { type IAccountSettingProps = {
onCancel: () => void onCancel: () => void
activeTab?: string activeTab?: string
...@@ -78,6 +83,22 @@ export default function AccountSetting({ ...@@ -78,6 +83,22 @@ export default function AccountSetting({
], ],
}, },
] ]
const scrollRef = useRef<HTMLDivElement>(null)
const [scrolled, setScrolled] = useState(false)
const scrollHandle = (e: any) => {
if (e.target.scrollTop > 0)
setScrolled(true)
else
setScrolled(false)
}
useEffect(() => {
const targetElement = scrollRef.current
targetElement?.addEventListener('scroll', scrollHandle)
return () => {
targetElement?.removeEventListener('scroll', scrollHandle)
}
}, [])
return ( return (
<Modal <Modal
...@@ -115,29 +136,19 @@ export default function AccountSetting({ ...@@ -115,29 +136,19 @@ export default function AccountSetting({
} }
</div> </div>
</div> </div>
<div className='w-[520px] h-[580px] px-6 py-4 overflow-y-auto'> <div ref={scrollRef} className='relative w-[520px] h-[580px] pb-4 overflow-y-auto'>
<div className='flex items-center justify-between h-6 mb-8 text-base font-medium text-gray-900 '> <div className={cn('sticky top-0 px-6 py-4 flex items-center justify-between h-14 mb-4 bg-white text-base font-medium text-gray-900', scrolled && scrolledClassName)}>
{[...menuItems[0].items, ...menuItems[1].items].find(item => item.key === activeMenu)?.name} {[...menuItems[0].items, ...menuItems[1].items].find(item => item.key === activeMenu)?.name}
<XMarkIcon className='w-4 h-4 cursor-pointer' onClick={onCancel} /> <XMarkIcon className='w-4 h-4 cursor-pointer' onClick={onCancel} />
</div> </div>
{ <div className='px-6'>
activeMenu === 'account' && <AccountPage /> {activeMenu === 'account' && <AccountPage />}
} {activeMenu === 'members' && <MembersPage />}
{ {activeMenu === 'integrations' && <IntegrationsPage />}
activeMenu === 'members' && <MembersPage /> {activeMenu === 'language' && <LanguagePage />}
} {activeMenu === 'provider' && <ProviderPage />}
{ {activeMenu === 'data-source' && <DataSourcePage />}
activeMenu === 'integrations' && <IntegrationsPage /> </div>
}
{
activeMenu === 'language' && <LanguagePage />
}
{
activeMenu === 'provider' && <ProviderPage />
}
{
activeMenu === 'data-source' && <DataSourcePage />
}
</div> </div>
</div> </div>
</Modal> </Modal>
......
...@@ -30,6 +30,7 @@ const MembersPage = () => { ...@@ -30,6 +30,7 @@ const MembersPage = () => {
const { userProfile } = useAppContext() const { userProfile } = useAppContext()
const { data, mutate } = useSWR({ url: '/workspaces/current/members' }, fetchMembers) const { data, mutate } = useSWR({ url: '/workspaces/current/members' }, fetchMembers)
const [inviteModalVisible, setInviteModalVisible] = useState(false) const [inviteModalVisible, setInviteModalVisible] = useState(false)
const [invitationLink, setInvitationLink] = useState('')
const [invitedModalVisible, setInvitedModalVisible] = useState(false) const [invitedModalVisible, setInvitedModalVisible] = useState(false)
const accounts = data?.accounts || [] const accounts = data?.accounts || []
const owner = accounts.filter(account => account.role === 'owner')?.[0]?.email === userProfile.email const owner = accounts.filter(account => account.role === 'owner')?.[0]?.email === userProfile.email
...@@ -93,8 +94,9 @@ const MembersPage = () => { ...@@ -93,8 +94,9 @@ const MembersPage = () => {
inviteModalVisible && ( inviteModalVisible && (
<InviteModal <InviteModal
onCancel={() => setInviteModalVisible(false)} onCancel={() => setInviteModalVisible(false)}
onSend={() => { onSend={(url) => {
setInvitedModalVisible(true) setInvitedModalVisible(true)
setInvitationLink(url)
mutate() mutate()
}} }}
/> />
...@@ -103,6 +105,7 @@ const MembersPage = () => { ...@@ -103,6 +105,7 @@ const MembersPage = () => {
{ {
invitedModalVisible && ( invitedModalVisible && (
<InvitedModal <InvitedModal
invitationLink={invitationLink}
onCancel={() => setInvitedModalVisible(false)} onCancel={() => setInvitedModalVisible(false)}
/> />
) )
......
...@@ -3,16 +3,16 @@ import { useState } from 'react' ...@@ -3,16 +3,16 @@ import { useState } from 'react'
import { useContext } from 'use-context-selector' import { useContext } from 'use-context-selector'
import { XMarkIcon } from '@heroicons/react/24/outline' import { XMarkIcon } from '@heroicons/react/24/outline'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import s from './index.module.css'
import Modal from '@/app/components/base/modal' import Modal from '@/app/components/base/modal'
import Button from '@/app/components/base/button' import Button from '@/app/components/base/button'
import s from './index.module.css'
import { inviteMember } from '@/service/common' import { inviteMember } from '@/service/common'
import { emailRegex } from '@/config' import { emailRegex } from '@/config'
import { ToastContext } from '@/app/components/base/toast' import { ToastContext } from '@/app/components/base/toast'
interface IInviteModalProps { type IInviteModalProps = {
onCancel: () => void, onCancel: () => void
onSend: () => void, onSend: (url: string) => void
} }
const InviteModal = ({ const InviteModal = ({
onCancel, onCancel,
...@@ -25,16 +25,16 @@ const InviteModal = ({ ...@@ -25,16 +25,16 @@ const InviteModal = ({
const handleSend = async () => { const handleSend = async () => {
if (emailRegex.test(email)) { if (emailRegex.test(email)) {
try { try {
const res = await inviteMember({ url: '/workspaces/current/members/invite-email', body: { email, role: 'admin'} }) const res = await inviteMember({ url: '/workspaces/current/members/invite-email', body: { email, role: 'admin' } })
if (res.result === 'success') { if (res.result === 'success') {
onCancel() onCancel()
onSend() onSend(res.invite_url)
} }
} catch (e) {
} }
} else { catch (e) {}
}
else {
notify({ type: 'error', message: t('common.members.emailInvalid') }) notify({ type: 'error', message: t('common.members.emailInvalid') })
} }
} }
...@@ -51,15 +51,15 @@ const InviteModal = ({ ...@@ -51,15 +51,15 @@ const InviteModal = ({
<div className='mb-2 text-sm font-medium text-gray-900'>{t('common.members.email')}</div> <div className='mb-2 text-sm font-medium text-gray-900'>{t('common.members.email')}</div>
<input <input
className=' className='
block w-full py-2 mb-9 px-3 bg-gray-50 outline-none border-none block w-full py-2 mb-9 px-3 bg-gray-50 outline-none border-none
appearance-none text-sm text-gray-900 rounded-lg appearance-none text-sm text-gray-900 rounded-lg
' '
value={email} value={email}
onChange={e => setEmail(e.target.value)} onChange={e => setEmail(e.target.value)}
placeholder={t('common.members.emailPlaceholder') || ''} placeholder={t('common.members.emailPlaceholder') || ''}
/> />
<Button <Button
className='w-full text-sm font-medium' className='w-full text-sm font-medium'
onClick={handleSend} onClick={handleSend}
type='primary' type='primary'
> >
......
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10.6665 2.66683C11.2865 2.66683 11.5965 2.66683 11.8508 2.73498C12.541 2.91991 13.0801 3.45901 13.265 4.14919C13.3332 4.40352 13.3332 4.71352 13.3332 5.3335V11.4668C13.3332 12.5869 13.3332 13.147 13.1152 13.5748C12.9234 13.9511 12.6175 14.2571 12.2412 14.4488C11.8133 14.6668 11.2533 14.6668 10.1332 14.6668H5.8665C4.7464 14.6668 4.18635 14.6668 3.75852 14.4488C3.3822 14.2571 3.07624 13.9511 2.88449 13.5748C2.6665 13.147 2.6665 12.5869 2.6665 11.4668V5.3335C2.6665 4.71352 2.6665 4.40352 2.73465 4.14919C2.91959 3.45901 3.45868 2.91991 4.14887 2.73498C4.4032 2.66683 4.71319 2.66683 5.33317 2.66683M5.99984 10.0002L7.33317 11.3335L10.3332 8.3335M6.39984 4.00016H9.59984C9.9732 4.00016 10.1599 4.00016 10.3025 3.9275C10.4279 3.86359 10.5299 3.7616 10.5938 3.63616C10.6665 3.49355 10.6665 3.30686 10.6665 2.9335V2.40016C10.6665 2.02679 10.6665 1.84011 10.5938 1.6975C10.5299 1.57206 10.4279 1.47007 10.3025 1.40616C10.1599 1.3335 9.97321 1.3335 9.59984 1.3335H6.39984C6.02647 1.3335 5.83978 1.3335 5.69718 1.40616C5.57174 1.47007 5.46975 1.57206 5.40583 1.6975C5.33317 1.84011 5.33317 2.02679 5.33317 2.40016V2.9335C5.33317 3.30686 5.33317 3.49355 5.40583 3.63616C5.46975 3.7616 5.57174 3.86359 5.69718 3.9275C5.83978 4.00016 6.02647 4.00016 6.39984 4.00016Z" stroke="#1D2939" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10.6665 2.66634H11.9998C12.3535 2.66634 12.6926 2.80682 12.9426 3.05687C13.1927 3.30691 13.3332 3.64605 13.3332 3.99967V13.333C13.3332 13.6866 13.1927 14.0258 12.9426 14.2758C12.6926 14.5259 12.3535 14.6663 11.9998 14.6663H3.99984C3.64622 14.6663 3.30708 14.5259 3.05703 14.2758C2.80698 14.0258 2.6665 13.6866 2.6665 13.333V3.99967C2.6665 3.64605 2.80698 3.30691 3.05703 3.05687C3.30708 2.80682 3.64622 2.66634 3.99984 2.66634H5.33317M5.99984 1.33301H9.99984C10.368 1.33301 10.6665 1.63148 10.6665 1.99967V3.33301C10.6665 3.7012 10.368 3.99967 9.99984 3.99967H5.99984C5.63165 3.99967 5.33317 3.7012 5.33317 3.33301V1.99967C5.33317 1.63148 5.63165 1.33301 5.99984 1.33301Z" stroke="#1D2939" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10.6665 2.66634H11.9998C12.3535 2.66634 12.6926 2.80682 12.9426 3.05687C13.1927 3.30691 13.3332 3.64605 13.3332 3.99967V13.333C13.3332 13.6866 13.1927 14.0258 12.9426 14.2758C12.6926 14.5259 12.3535 14.6663 11.9998 14.6663H3.99984C3.64622 14.6663 3.30708 14.5259 3.05703 14.2758C2.80698 14.0258 2.6665 13.6866 2.6665 13.333V3.99967C2.6665 3.64605 2.80698 3.30691 3.05703 3.05687C3.30708 2.80682 3.64622 2.66634 3.99984 2.66634H5.33317M5.99984 1.33301H9.99984C10.368 1.33301 10.6665 1.63148 10.6665 1.99967V3.33301C10.6665 3.7012 10.368 3.99967 9.99984 3.99967H5.99984C5.63165 3.99967 5.33317 3.7012 5.33317 3.33301V1.99967C5.33317 1.63148 5.63165 1.33301 5.99984 1.33301Z" stroke="#667085" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
...@@ -2,4 +2,20 @@ ...@@ -2,4 +2,20 @@
padding: 32px !important; padding: 32px !important;
width: 480px !important; width: 480px !important;
background: linear-gradient(180deg, rgba(3, 152, 85, 0.05) 0%, rgba(3, 152, 85, 0) 22.44%), #F9FAFB !important; background: linear-gradient(180deg, rgba(3, 152, 85, 0.05) 0%, rgba(3, 152, 85, 0) 22.44%), #F9FAFB !important;
}
.copyIcon {
background-image: url(./assets/copy.svg);
background-position: center;
background-repeat: no-repeat;
}
.copyIcon:hover {
background-image: url(./assets/copy-hover.svg);
background-position: center;
background-repeat: no-repeat;
}
.copyIcon.copied {
background-image: url(./assets/copied.svg);
} }
\ No newline at end of file
import { CheckCircleIcon } from '@heroicons/react/24/solid' import { CheckCircleIcon } from '@heroicons/react/24/solid'
import { XMarkIcon } from '@heroicons/react/24/outline' import { XMarkIcon } from '@heroicons/react/24/outline'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import InvitationLink from './invitation-link'
import s from './index.module.css'
import Modal from '@/app/components/base/modal' import Modal from '@/app/components/base/modal'
import Button from '@/app/components/base/button' import Button from '@/app/components/base/button'
import s from './index.module.css' type IInvitedModalProps = {
invitationLink: string
interface IInvitedModalProps { onCancel: () => void
onCancel: () => void,
} }
const InvitedModal = ({ const InvitedModal = ({
onCancel invitationLink,
onCancel,
}: IInvitedModalProps) => { }: IInvitedModalProps) => {
const { t } = useTranslation() const { t } = useTranslation()
...@@ -27,10 +29,14 @@ const InvitedModal = ({ ...@@ -27,10 +29,14 @@ const InvitedModal = ({
<XMarkIcon className='w-4 h-4 cursor-pointer' onClick={onCancel} /> <XMarkIcon className='w-4 h-4 cursor-pointer' onClick={onCancel} />
</div> </div>
<div className='mb-1 text-xl font-semibold text-gray-900'>{t('common.members.invitationSent')}</div> <div className='mb-1 text-xl font-semibold text-gray-900'>{t('common.members.invitationSent')}</div>
<div className='mb-10 text-sm text-gray-500'>{t('common.members.invitationSentTip')}</div> <div className='mb-5 text-sm text-gray-500'>{t('common.members.invitationSentTip')}</div>
<div className='mb-9'>
<div className='py-2 text-sm font-Medium text-gray-900'>{t('common.members.invitationLink')}</div>
<InvitationLink value={invitationLink} />
</div>
<div className='flex justify-end'> <div className='flex justify-end'>
<Button <Button
className='w-[96px] text-sm font-medium' className='w-[96px] text-sm font-medium'
onClick={onCancel} onClick={onCancel}
type='primary' type='primary'
> >
...@@ -42,4 +48,4 @@ const InvitedModal = ({ ...@@ -42,4 +48,4 @@ const InvitedModal = ({
) )
} }
export default InvitedModal export default InvitedModal
\ No newline at end of file
'use client'
import React, { useCallback, useEffect, useState } from 'react'
import { t } from 'i18next'
import s from './index.module.css'
import Tooltip from '@/app/components/base/tooltip'
import useCopyToClipboard from '@/hooks/use-copy-to-clipboard'
type IInvitationLinkProps = {
value?: string
}
const InvitationLink = ({
value = '',
}: IInvitationLinkProps) => {
const [isCopied, setIsCopied] = useState(false)
const [_, copy] = useCopyToClipboard()
const copyHandle = useCallback(() => {
copy(value)
setIsCopied(true)
}, [value, copy])
useEffect(() => {
if (isCopied) {
const timeout = setTimeout(() => {
setIsCopied(false)
}, 1000)
return () => {
clearTimeout(timeout)
}
}
}, [isCopied])
return (
<div className='flex rounded-lg bg-gray-100 hover:bg-gray-100 border border-gray-200 py-2 items-center'>
<div className="flex items-center flex-grow h-5">
<div className='flex-grow bg-gray-100 text-[13px] relative h-full'>
<Tooltip
selector="top-uniq"
content={isCopied ? `${t('appApi.copied')}` : `${t('appApi.copy')}`}
className='z-10'
>
<div className='absolute top-0 left-0 w-full pl-2 pr-2 truncate cursor-pointer r-0' onClick={copyHandle}>{value}</div>
</Tooltip>
</div>
<div className="flex-shrink-0 h-4 bg-gray-200 border" />
<Tooltip
selector="top-uniq"
content={isCopied ? `${t('appApi.copied')}` : `${t('appApi.copy')}`}
className='z-10'
>
<div className="px-0.5 flex-shrink-0">
<div className={`box-border w-[30px] h-[30px] flex items-center justify-center rounded-lg hover:bg-gray-100 cursor-pointer ${s.copyIcon} ${isCopied ? s.copied : ''}`} onClick={copyHandle}>
</div>
</div>
</Tooltip>
</div>
</div>
)
}
export default InvitationLink
.icon {
width: 24px;
height: 24px;
margin-right: 12px;
background: url(../../../assets/anthropic.svg) center center no-repeat;
background-size: contain;
}
.bar {
background: linear-gradient(90deg, rgba(41, 112, 255, 0.9) 0%, rgba(21, 94, 239, 0.9) 100%);
}
.bar-error {
background: linear-gradient(90deg, rgba(240, 68, 56, 0.72) 0%, rgba(217, 45, 32, 0.9) 100%);
}
.bar-item {
width: 10%;
border-right: 1px solid rgba(255, 255, 255, 0.5);
}
.bar-item:last-of-type {
border-right: 0;
}
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment