Commit 46dddc1b authored by StyleZhang's avatar StyleZhang

Merge branch 'main' into feat/embedding

parents 44ef36d4 da5782df
......@@ -109,6 +109,7 @@ venv/
ENV/
env.bak/
venv.bak/
.conda/
# Spyder project settings
.spyderproject
......@@ -147,3 +148,5 @@ docker/volumes/weaviate/*
sdks/python-client/build
sdks/python-client/dist
sdks/python-client/dify_client.egg-info
.vscode/
\ No newline at end of file
......@@ -155,7 +155,7 @@ def register_blueprints(app):
resources={
r"/*": {"origins": app.config['WEB_API_CORS_ALLOW_ORIGINS']}},
supports_credentials=True,
allow_headers=['Content-Type', 'Authorization'],
allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],
methods=['GET', 'PUT', 'POST', 'DELETE', 'OPTIONS', 'PATCH'],
expose_headers=['X-Version', 'X-Env']
)
......
......@@ -79,7 +79,7 @@ class Config:
self.CONSOLE_URL = get_env('CONSOLE_URL')
self.API_URL = get_env('API_URL')
self.APP_URL = get_env('APP_URL')
self.CURRENT_VERSION = "0.3.6"
self.CURRENT_VERSION = "0.3.7"
self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED"
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
......
......@@ -9,7 +9,7 @@ api = ExternalApi(bp)
from . import setup, version, apikey, admin
# Import app controllers
from .app import app, site, completion, model_config, statistic, conversation, message, generator
from .app import app, site, completion, model_config, statistic, conversation, message, generator, audio
# Import auth controllers
from .auth import login, oauth, data_source_oauth
......@@ -21,4 +21,4 @@ from .datasets import datasets, datasets_document, datasets_segments, file, hit_
from .workspace import workspace, members, providers, account
# Import explore controllers
from .explore import installed_app, recommended_app, completion, conversation, message, parameter, saved_message
from .explore import installed_app, recommended_app, completion, conversation, message, parameter, saved_message, audio
......@@ -22,6 +22,7 @@ model_config_fields = {
'opening_statement': fields.String,
'suggested_questions': fields.Raw(attribute='suggested_questions_list'),
'suggested_questions_after_answer': fields.Raw(attribute='suggested_questions_after_answer_dict'),
'speech_to_text': fields.Raw(attribute='speech_to_text_dict'),
'more_like_this': fields.Raw(attribute='more_like_this_dict'),
'model': fields.Raw(attribute='model_dict'),
'user_input_form': fields.Raw(attribute='user_input_form_list'),
......@@ -144,6 +145,7 @@ class AppListApi(Resource):
opening_statement=model_configuration['opening_statement'],
suggested_questions=json.dumps(model_configuration['suggested_questions']),
suggested_questions_after_answer=json.dumps(model_configuration['suggested_questions_after_answer']),
speech_to_text=json.dumps(model_configuration['speech_to_text']),
more_like_this=json.dumps(model_configuration['more_like_this']),
model=json.dumps(model_configuration['model']),
user_input_form=json.dumps(model_configuration['user_input_form']),
......@@ -434,6 +436,7 @@ class AppCopy(Resource):
opening_statement=app_config.opening_statement,
suggested_questions=app_config.suggested_questions,
suggested_questions_after_answer=app_config.suggested_questions_after_answer,
speech_to_text=app_config.speech_to_text,
more_like_this=app_config.more_like_this,
model=app_config.model,
user_input_form=app_config.user_input_form,
......
# -*- coding:utf-8 -*-
import logging
from flask import request
from flask_login import login_required
from werkzeug.exceptions import InternalServerError, NotFound
import services
from controllers.console import api
from controllers.console.app import _get_app
from controllers.console.app.error import AppUnavailableError, \
ProviderNotInitializeError, CompletionRequestError, ProviderQuotaExceededError, \
ProviderModelCurrentlyNotSupportError, NoAudioUploadedError, AudioTooLargeError, \
UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from flask_restful import Resource
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
class ChatMessageAudioApi(Resource):
@setup_required
@login_required
@account_initialization_required
def post(self, app_id):
app_id = str(app_id)
app_model = _get_app(app_id, 'chat')
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(ChatMessageAudioApi, '/apps/<uuid:app_id>/audio-to-text')
\ No newline at end of file
......@@ -49,3 +49,27 @@ class AppMoreLikeThisDisabledError(BaseHTTPException):
error_code = 'app_more_like_this_disabled'
description = "The 'More like this' feature is disabled. Please refresh your page."
code = 403
class NoAudioUploadedError(BaseHTTPException):
error_code = 'no_audio_uploaded'
description = "Please upload your audio."
code = 400
class AudioTooLargeError(BaseHTTPException):
error_code = 'audio_too_large'
description = "Audio size exceeded. {message}"
code = 413
class UnsupportedAudioTypeError(BaseHTTPException):
error_code = 'unsupported_audio_type'
description = "Audio type not allowed."
code = 415
class ProviderNotSupportSpeechToTextError(BaseHTTPException):
error_code = 'provider_not_support_speech_to_text'
description = "Provider not support speech to text."
code = 400
\ No newline at end of file
......@@ -41,6 +41,7 @@ class ModelConfigResource(Resource):
opening_statement=model_configuration['opening_statement'],
suggested_questions=json.dumps(model_configuration['suggested_questions']),
suggested_questions_after_answer=json.dumps(model_configuration['suggested_questions_after_answer']),
speech_to_text=json.dumps(model_configuration['speech_to_text']),
more_like_this=json.dumps(model_configuration['more_like_this']),
model=json.dumps(model_configuration['model']),
user_input_form=json.dumps(model_configuration['user_input_form']),
......
# -*- coding:utf-8 -*-
import logging
from flask import request
from werkzeug.exceptions import InternalServerError
import services
from controllers.console import api
from controllers.console.app.error import AppUnavailableError, ProviderNotInitializeError, \
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError, \
NoAudioUploadedError, AudioTooLargeError, \
UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError
from controllers.console.explore.wraps import InstalledAppResource
from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
from models.model import AppModelConfig
class ChatAudioApi(InstalledAppResource):
def post(self, installed_app):
app_model = installed_app.app
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(ChatAudioApi, '/installed-apps/<uuid:installed_app_id>/audio-to-text', endpoint='installed_app_audio')
\ No newline at end of file
......@@ -21,6 +21,7 @@ class AppParameterApi(InstalledAppResource):
'opening_statement': fields.String,
'suggested_questions': fields.Raw,
'suggested_questions_after_answer': fields.Raw,
'speech_to_text': fields.Raw,
'more_like_this': fields.Raw,
'user_input_form': fields.Raw,
}
......@@ -35,6 +36,7 @@ class AppParameterApi(InstalledAppResource):
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list
}
......
......@@ -7,6 +7,6 @@ bp = Blueprint('service_api', __name__, url_prefix='/v1')
api = ExternalApi(bp)
from .app import completion, app, conversation, message
from .app import completion, app, conversation, message, audio
from .dataset import document
......@@ -22,6 +22,7 @@ class AppParameterApi(AppApiResource):
'opening_statement': fields.String,
'suggested_questions': fields.Raw,
'suggested_questions_after_answer': fields.Raw,
'speech_to_text': fields.Raw,
'more_like_this': fields.Raw,
'user_input_form': fields.Raw,
}
......@@ -35,6 +36,7 @@ class AppParameterApi(AppApiResource):
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list
}
......
import logging
from flask import request
from werkzeug.exceptions import InternalServerError
import services
from controllers.service_api import api
from controllers.service_api.app.error import AppUnavailableError, ProviderNotInitializeError, CompletionRequestError, ProviderQuotaExceededError, \
ProviderModelCurrentlyNotSupportError, NoAudioUploadedError, AudioTooLargeError, UnsupportedAudioTypeError, \
ProviderNotSupportSpeechToTextError
from controllers.service_api.wraps import AppApiResource
from core.llm.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from models.model import App, AppModelConfig
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
class AudioApi(AppApiResource):
def post(self, app_model: App, end_user):
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(AudioApi, '/audio-to-text')
\ No newline at end of file
......@@ -51,3 +51,27 @@ class CompletionRequestError(BaseHTTPException):
description = "Completion request failed."
code = 400
class NoAudioUploadedError(BaseHTTPException):
error_code = 'no_audio_uploaded'
description = "Please upload your audio."
code = 400
class AudioTooLargeError(BaseHTTPException):
error_code = 'audio_too_large'
description = "Audio size exceeded. {message}"
code = 413
class UnsupportedAudioTypeError(BaseHTTPException):
error_code = 'unsupported_audio_type'
description = "Audio type not allowed."
code = 415
class ProviderNotSupportSpeechToTextError(BaseHTTPException):
error_code = 'provider_not_support_speech_to_text'
description = "Provider not support speech to text."
code = 400
......@@ -7,4 +7,4 @@ bp = Blueprint('web', __name__, url_prefix='/api')
api = ExternalApi(bp)
from . import completion, app, conversation, message, site, saved_message
from . import completion, app, conversation, message, site, saved_message, audio, passport
......@@ -21,6 +21,7 @@ class AppParameterApi(WebApiResource):
'opening_statement': fields.String,
'suggested_questions': fields.Raw,
'suggested_questions_after_answer': fields.Raw,
'speech_to_text': fields.Raw,
'more_like_this': fields.Raw,
'user_input_form': fields.Raw,
}
......@@ -34,6 +35,7 @@ class AppParameterApi(WebApiResource):
'opening_statement': app_model_config.opening_statement,
'suggested_questions': app_model_config.suggested_questions_list,
'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict,
'speech_to_text': app_model_config.speech_to_text_dict,
'more_like_this': app_model_config.more_like_this_dict,
'user_input_form': app_model_config.user_input_form_list
}
......
# -*- coding:utf-8 -*-
import logging
from flask import request
from werkzeug.exceptions import InternalServerError
import services
from controllers.web import api
from controllers.web.error import AppUnavailableError, ProviderNotInitializeError, CompletionRequestError, \
ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, NoAudioUploadedError, AudioTooLargeError, \
UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError
from controllers.web.wraps import WebApiResource
from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \
LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError
from services.audio_service import AudioService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \
UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
from models.model import App, AppModelConfig
class AudioApi(WebApiResource):
def post(self, app_model: App, end_user):
app_model_config: AppModelConfig = app_model.app_model_config
if not app_model_config.speech_to_text_dict['enabled']:
raise AppUnavailableError()
file = request.files['file']
try:
response = AudioService.transcript(
tenant_id=app_model.tenant_id,
file=file,
)
return response
except services.errors.app_model_config.AppModelConfigBrokenError:
logging.exception("App model config broken.")
raise AppUnavailableError()
except NoAudioUploadedServiceError:
raise NoAudioUploadedError()
except AudioTooLargeServiceError as e:
raise AudioTooLargeError(str(e))
except UnsupportedAudioTypeServiceError:
raise UnsupportedAudioTypeError()
except ProviderNotSupportSpeechToTextServiceError:
raise ProviderNotSupportSpeechToTextError()
except ProviderTokenNotInitError:
raise ProviderNotInitializeError()
except QuotaExceededError:
raise ProviderQuotaExceededError()
except ModelCurrentlyNotSupportError:
raise ProviderModelCurrentlyNotSupportError()
except (LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError,
LLMRateLimitError, LLMAuthorizationError) as e:
raise CompletionRequestError(str(e))
except ValueError as e:
raise e
except Exception as e:
logging.exception("internal server error.")
raise InternalServerError()
api.add_resource(AudioApi, '/audio-to-text')
\ No newline at end of file
......@@ -62,3 +62,27 @@ class AppSuggestedQuestionsAfterAnswerDisabledError(BaseHTTPException):
error_code = 'app_suggested_questions_after_answer_disabled'
description = "The 'Suggested Questions After Answer' feature is disabled. Please refresh your page."
code = 403
class NoAudioUploadedError(BaseHTTPException):
error_code = 'no_audio_uploaded'
description = "Please upload your audio."
code = 400
class AudioTooLargeError(BaseHTTPException):
error_code = 'audio_too_large'
description = "Audio size exceeded. {message}"
code = 413
class UnsupportedAudioTypeError(BaseHTTPException):
error_code = 'unsupported_audio_type'
description = "Audio type not allowed."
code = 415
class ProviderNotSupportSpeechToTextError(BaseHTTPException):
error_code = 'provider_not_support_speech_to_text'
description = "Provider not support speech to text."
code = 400
\ No newline at end of file
# -*- coding:utf-8 -*-
import uuid
from controllers.web import api
from flask_restful import Resource
from flask import request
from werkzeug.exceptions import Unauthorized, NotFound
from models.model import Site, EndUser, App
from extensions.ext_database import db
from libs.passport import PassportService
class PassportResource(Resource):
"""Base resource for passport."""
def get(self):
app_id = request.headers.get('X-App-Code')
if app_id is None:
raise Unauthorized('X-App-Code header is missing.')
# get site from db and check if it is normal
site = db.session.query(Site).filter(
Site.code == app_id,
Site.status == 'normal'
).first()
if not site:
raise NotFound()
# get app from db and check if it is normal and enable_site
app_model = db.session.query(App).filter(App.id == site.app_id).first()
if not app_model or app_model.status != 'normal' or not app_model.enable_site:
raise NotFound()
end_user = EndUser(
tenant_id=app_model.tenant_id,
app_id=app_model.id,
type='browser',
is_anonymous=True,
session_id=generate_session_id(),
)
db.session.add(end_user)
db.session.commit()
payload = {
"iss": site.app_id,
'sub': 'Web API Passport',
'app_id': site.app_id,
'end_user_id': end_user.id,
}
tk = PassportService().issue(payload)
return {
'access_token': tk,
}
api.add_resource(PassportResource, '/passport')
def generate_session_id():
"""
Generate a unique session ID.
"""
while True:
session_id = str(uuid.uuid4())
existing_count = db.session.query(EndUser) \
.filter(EndUser.session_id == session_id).count()
if existing_count == 0:
return session_id
# -*- coding:utf-8 -*-
import uuid
from functools import wraps
from flask import request, session
from flask import request
from flask_restful import Resource
from werkzeug.exceptions import NotFound, Unauthorized
from extensions.ext_database import db
from models.model import App, Site, EndUser
from models.model import App, EndUser
from libs.passport import PassportService
def validate_token(view=None):
def validate_jwt_token(view=None):
def decorator(view):
@wraps(view)
def decorated(*args, **kwargs):
site = validate_and_get_site()
app_model = db.session.query(App).filter(App.id == site.app_id).first()
if not app_model:
raise NotFound()
if app_model.status != 'normal':
raise NotFound()
if not app_model.enable_site:
raise NotFound()
end_user = create_or_update_end_user_for_session(app_model)
app_model, end_user = decode_jwt_token()
return view(app_model, end_user, *args, **kwargs)
return decorated
if view:
return decorator(view)
return decorator
def validate_and_get_site():
"""
Validate and get API token.
"""
def decode_jwt_token():
auth_header = request.headers.get('Authorization')
if auth_header is None:
raise Unauthorized('Authorization header is missing.')
......@@ -47,64 +29,20 @@ def validate_and_get_site():
if ' ' not in auth_header:
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
auth_scheme, auth_token = auth_header.split(None, 1)
auth_scheme, tk = auth_header.split(None, 1)
auth_scheme = auth_scheme.lower()
if auth_scheme != 'bearer':
raise Unauthorized('Invalid Authorization header format. Expected \'Bearer <api-key>\' format.')
site = db.session.query(Site).filter(
Site.code == auth_token,
Site.status == 'normal'
).first()
if not site:
decoded = PassportService().verify(tk)
app_model = db.session.query(App).filter(App.id == decoded['app_id']).first()
if not app_model:
raise NotFound()
end_user = db.session.query(EndUser).filter(EndUser.id == decoded['end_user_id']).first()
if not end_user:
raise NotFound()
return site
def create_or_update_end_user_for_session(app_model):
"""
Create or update session terminal based on session ID.
"""
if 'session_id' not in session:
session['session_id'] = generate_session_id()
session_id = session.get('session_id')
end_user = db.session.query(EndUser) \
.filter(
EndUser.session_id == session_id,
EndUser.type == 'browser'
).first()
if end_user is None:
end_user = EndUser(
tenant_id=app_model.tenant_id,
app_id=app_model.id,
type='browser',
is_anonymous=True,
session_id=session_id
)
db.session.add(end_user)
db.session.commit()
return end_user
def generate_session_id():
"""
Generate a unique session ID.
"""
count = 1
session_id = ''
while count != 0:
session_id = str(uuid.uuid4())
count = db.session.query(EndUser) \
.filter(EndUser.session_id == session_id).count()
return session_id
return app_model, end_user
class WebApiResource(Resource):
method_decorators = [validate_token]
method_decorators = [validate_jwt_token]
......@@ -44,6 +44,7 @@ class AzureProvider(BaseProvider):
config['openai_api_type'] = 'azure'
if model_id == 'text-embedding-ada-002':
config['deployment'] = model_id.replace('.', '') if model_id else None
config['chunk_size'] = 1
else:
config['deployment_name'] = model_id.replace('.', '') if model_id else None
return config
......
import openai
from models.provider import ProviderName
from core.llm.error_handle_wraps import handle_llm_exceptions
from core.llm.provider.base import BaseProvider
class Whisper:
def __init__(self, provider: BaseProvider):
self.provider = provider
if self.provider.get_provider_name() == ProviderName.OPENAI:
self.client = openai.Audio
self.credentials = provider.get_credentials()
@handle_llm_exceptions
def transcribe(self, file):
return self.client.transcribe(
model='whisper-1',
file=file,
api_key=self.credentials.get('openai_api_key'),
api_base=self.credentials.get('openai_api_base'),
api_type=self.credentials.get('openai_api_type'),
api_version=self.credentials.get('openai_api_version'),
)
# -*- coding:utf-8 -*-
import jwt
from werkzeug.exceptions import Unauthorized
from flask import current_app
class PassportService:
def __init__(self):
self.sk = current_app.config.get('SECRET_KEY')
def issue(self, payload):
return jwt.encode(payload, self.sk, algorithm='HS256')
def verify(self, token):
try:
return jwt.decode(token, self.sk, algorithms=['HS256'])
except jwt.exceptions.InvalidSignatureError:
raise Unauthorized('Invalid token signature.')
except jwt.exceptions.DecodeError:
raise Unauthorized('Invalid token.')
except jwt.exceptions.ExpiredSignatureError:
raise Unauthorized('Token has expired.')
"""app config add speech_to_text
Revision ID: a5b56fb053ef
Revises: d3d503a3471c
Create Date: 2023-07-06 17:55:20.894149
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a5b56fb053ef'
down_revision = 'd3d503a3471c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('app_model_configs', schema=None) as batch_op:
batch_op.add_column(sa.Column('speech_to_text', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('app_model_configs', schema=None) as batch_op:
batch_op.drop_column('speech_to_text')
# ### end Alembic commands ###
......@@ -81,6 +81,7 @@ class AppModelConfig(db.Model):
opening_statement = db.Column(db.Text)
suggested_questions = db.Column(db.Text)
suggested_questions_after_answer = db.Column(db.Text)
speech_to_text = db.Column(db.Text)
more_like_this = db.Column(db.Text)
model = db.Column(db.Text)
user_input_form = db.Column(db.Text)
......@@ -105,6 +106,11 @@ class AppModelConfig(db.Model):
return json.loads(self.suggested_questions_after_answer) if self.suggested_questions_after_answer \
else {"enabled": False}
@property
def speech_to_text_dict(self) -> dict:
return json.loads(self.speech_to_text) if self.speech_to_text \
else {"enabled": False}
@property
def more_like_this_dict(self) -> dict:
return json.loads(self.more_like_this) if self.more_like_this else {"enabled": False}
......@@ -223,6 +229,9 @@ class Conversation(db.Model):
model_config['suggested_questions_after_answer'] = override_model_configs[
'suggested_questions_after_answer'] \
if 'suggested_questions_after_answer' in override_model_configs else {"enabled": False}
model_config['speech_to_text'] = override_model_configs[
'speech_to_text'] \
if 'speech_to_text' in override_model_configs else {"enabled": False}
model_config['more_like_this'] = override_model_configs['more_like_this'] \
if 'more_like_this' in override_model_configs else {"enabled": False}
model_config['user_input_form'] = override_model_configs['user_input_form']
......@@ -239,6 +248,7 @@ class Conversation(db.Model):
model_config['opening_statement'] = app_model_config.opening_statement
model_config['suggested_questions'] = app_model_config.suggested_questions_list
model_config['suggested_questions_after_answer'] = app_model_config.suggested_questions_after_answer_dict
model_config['speech_to_text'] = app_model_config.speech_to_text_dict
model_config['more_like_this'] = app_model_config.more_like_this_dict
model_config['user_input_form'] = app_model_config.user_input_form_list
......
......@@ -33,3 +33,4 @@ openpyxl==3.1.2
chardet~=5.1.0
docx2txt==0.8
pypdfium2==4.16.0
pyjwt~=2.6.0
\ No newline at end of file
......@@ -4,6 +4,7 @@ import uuid
from core.constant import llm_constant
from models.account import Account
from services.dataset_service import DatasetService
from core.llm.llm_builder import LLMBuilder
class AppModelConfigService:
......@@ -109,6 +110,26 @@ class AppModelConfigService:
if not isinstance(config["suggested_questions_after_answer"]["enabled"], bool):
raise ValueError("enabled in suggested_questions_after_answer must be of boolean type")
# speech_to_text
if 'speech_to_text' not in config or not config["speech_to_text"]:
config["speech_to_text"] = {
"enabled": False
}
if not isinstance(config["speech_to_text"], dict):
raise ValueError("speech_to_text must be of dict type")
if "enabled" not in config["speech_to_text"] or not config["speech_to_text"]["enabled"]:
config["speech_to_text"]["enabled"] = False
if not isinstance(config["speech_to_text"]["enabled"], bool):
raise ValueError("enabled in speech_to_text must be of boolean type")
provider_name = LLMBuilder.get_default_provider(account.current_tenant_id)
if config["speech_to_text"]["enabled"] and provider_name != 'openai':
raise ValueError("provider not support speech to text")
# more_like_this
if 'more_like_this' not in config or not config["more_like_this"]:
config["more_like_this"] = {
......@@ -277,6 +298,7 @@ class AppModelConfigService:
"opening_statement": config["opening_statement"],
"suggested_questions": config["suggested_questions"],
"suggested_questions_after_answer": config["suggested_questions_after_answer"],
"speech_to_text": config["speech_to_text"],
"more_like_this": config["more_like_this"],
"model": {
"provider": config["model"]["provider"],
......
import io
from werkzeug.datastructures import FileStorage
from core.llm.llm_builder import LLMBuilder
from core.llm.provider.llm_provider_service import LLMProviderService
from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError
from core.llm.whisper import Whisper
from models.provider import ProviderName
FILE_SIZE_LIMIT = 1 * 1024 * 1024
ALLOWED_EXTENSIONS = ['mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm']
class AudioService:
@classmethod
def transcript(cls, tenant_id: str, file: FileStorage):
if file is None:
raise NoAudioUploadedServiceError()
extension = file.mimetype
if extension not in [f'audio/{ext}' for ext in ALLOWED_EXTENSIONS]:
raise UnsupportedAudioTypeServiceError()
file_content = file.read()
file_size = len(file_content)
if file_size > FILE_SIZE_LIMIT:
message = f"({file_size} > {FILE_SIZE_LIMIT})"
raise AudioTooLargeServiceError(message)
provider_name = LLMBuilder.get_default_provider(tenant_id)
if provider_name != ProviderName.OPENAI.value:
raise ProviderNotSupportSpeechToTextServiceError('haha')
provider_service = LLMProviderService(tenant_id, provider_name)
buffer = io.BytesIO(file_content)
buffer.name = 'temp.wav'
return Whisper(provider_service.provider).transcribe(buffer)
\ No newline at end of file
# -*- coding:utf-8 -*-
__all__ = [
'base', 'conversation', 'message', 'index', 'app_model_config', 'account', 'document', 'dataset',
'app', 'completion'
'app', 'completion', 'audio'
]
from . import *
from services.errors.base import BaseServiceError
class NoAudioUploadedServiceError(BaseServiceError):
error_code = 'no_audio_uploaded'
description = "Please upload your audio."
code = 400
class AudioTooLargeServiceError(BaseServiceError):
error_code = 'audio_too_large'
description = "Audio size exceeded. {message}"
code = 413
class UnsupportedAudioTypeServiceError(BaseServiceError):
error_code = 'unsupported_audio_type'
description = "Audio type not allowed."
code = 415
class ProviderNotSupportSpeechToTextServiceError(BaseServiceError):
error_code = 'provider_not_support_speech_to_text'
description = "Provider not support speech to text. {message}"
code = 400
\ No newline at end of file
......@@ -7,7 +7,7 @@ from celery import shared_task
from core.index.index import IndexBuilder
from extensions.ext_database import db
from models.dataset import DocumentSegment, Dataset, DatasetKeywordTable, DatasetQuery, DatasetProcessRule, \
AppDatasetJoin
AppDatasetJoin, Document
@shared_task
......@@ -32,7 +32,7 @@ def clean_dataset_task(dataset_id: str, tenant_id: str, indexing_technique: str,
index_struct=index_struct
)
documents = db.session.query(DocumentSegment).filter(DocumentSegment.dataset_id == dataset_id).all()
documents = db.session.query(Document).filter(Document.dataset_id == dataset_id).all()
segments = db.session.query(DocumentSegment).filter(DocumentSegment.dataset_id == dataset_id).all()
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
......
......@@ -2,7 +2,7 @@ version: '3.1'
services:
# API service
api:
image: langgenius/dify-api:0.3.6
image: langgenius/dify-api:0.3.7
restart: always
environment:
# Startup mode, 'api' starts the API server.
......@@ -110,7 +110,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.3.6
image: langgenius/dify-api:0.3.7
restart: always
environment:
# Startup mode, 'worker' starts the Celery worker for processing the queue.
......@@ -156,7 +156,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.3.6
image: langgenius/dify-web:0.3.7
restart: always
environment:
EDITION: SELF_HOSTED
......
......@@ -3,6 +3,7 @@ import type { FC } from 'react'
import React, { useEffect, useLayoutEffect, useRef, useState } from 'react'
import { useContext } from 'use-context-selector'
import cn from 'classnames'
import Recorder from 'js-audio-recorder'
import { HandThumbDownIcon, HandThumbUpIcon } from '@heroicons/react/24/outline'
import { UserCircleIcon } from '@heroicons/react/24/solid'
import { useTranslation } from 'react-i18next'
......@@ -19,6 +20,10 @@ import AppContext from '@/context/app-context'
import { Markdown } from '@/app/components/base/markdown'
import { formatNumber } from '@/utils/format'
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
import VoiceInput from '@/app/components/base/voice-input'
import { Microphone01 } from '@/app/components/base/icons/src/vender/line/mediaAndDevices'
import { Microphone01 as Microphone01Solid } from '@/app/components/base/icons/src/vender/solid/mediaAndDevices'
import { XCircle } from '@/app/components/base/icons/src/vender/solid/general'
const stopIcon = (
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg">
......@@ -59,6 +64,7 @@ export type IChatProps = {
controlFocus?: number
isShowSuggestion?: boolean
suggestionList?: string[]
isShowSpeechToText?: boolean
}
export type MessageMore = {
......@@ -421,6 +427,7 @@ const Chat: FC<IChatProps> = ({
controlFocus,
isShowSuggestion,
suggestionList,
isShowSpeechToText,
}) => {
const { t } = useTranslation()
const { notify } = useContext(ToastContext)
......@@ -488,6 +495,15 @@ const Chat: FC<IChatProps> = ({
}
}, [suggestionList])
const [voiceInputShow, setVoiceInputShow] = useState(false)
const handleVoiceInputShow = () => {
(Recorder as any).getPermission().then(() => {
setVoiceInputShow(true)
}, () => {
logError(t('common.voiceInput.notAllow'))
})
}
return (
<div className={cn('px-3.5', 'h-full')}>
{/* Chat List */}
......@@ -565,6 +581,26 @@ const Chat: FC<IChatProps> = ({
/>
<div className="absolute top-0 right-2 flex items-center h-[48px]">
<div className={`${s.count} mr-4 h-5 leading-5 text-sm bg-gray-50 text-gray-500`}>{query.trim().length}</div>
{
query
? (
<div className='flex justify-center items-center w-8 h-8 cursor-pointer hover:bg-gray-100 rounded-lg' onClick={() => setQuery('')}>
<XCircle className='w-4 h-4 text-[#98A2B3]' />
</div>
)
: isShowSpeechToText
? (
<div
className='group flex justify-center items-center w-8 h-8 hover:bg-primary-50 rounded-lg cursor-pointer'
onClick={handleVoiceInputShow}
>
<Microphone01 className='block w-4 h-4 text-gray-500 group-hover:hidden' />
<Microphone01Solid className='hidden w-4 h-4 text-primary-600 group-hover:block' />
</div>
)
: null
}
<div className='mx-2 w-[1px] h-4 bg-black opacity-5' />
{isMobile
? sendBtn
: (
......@@ -581,6 +617,14 @@ const Chat: FC<IChatProps> = ({
</Tooltip>
)}
</div>
{
voiceInputShow && (
<VoiceInput
onCancel={() => setVoiceInputShow(false)}
onConverted={text => setQuery(text)}
/>
)
}
</div>
</div>
)
......
......@@ -79,7 +79,7 @@
.textArea {
padding-top: 13px;
padding-bottom: 13px;
padding-right: 90px;
padding-right: 130px;
border-radius: 12px;
line-height: 20px;
background-color: #fff;
......
'use client'
import type { FC } from 'react'
import React, { useEffect } from 'react'
import React, { useEffect, useState } from 'react'
import cn from 'classnames'
import { useTranslation } from 'react-i18next'
import { useBoolean, useClickAway } from 'ahooks'
......@@ -12,6 +12,7 @@ import type { CompletionParams } from '@/models/debug'
import { AppType } from '@/types/app'
import { TONE_LIST } from '@/config'
import Toast from '@/app/components/base/toast'
import { AlertTriangle } from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback'
export type IConifgModelProps = {
mode: string
......@@ -55,6 +56,7 @@ const ConifgModel: FC<IConifgModelProps> = ({
const isChatApp = mode === AppType.chat
const availableModels = options.filter(item => item.type === mode)
const [isShowConfig, { setFalse: hideConfig, toggle: toogleShowConfig }] = useBoolean(false)
const [maxTokenSettingTipVisible, setMaxTokenSettingTipVisible] = useState(false)
const configContentRef = React.useRef(null)
useClickAway(() => {
hideConfig()
......@@ -177,6 +179,14 @@ const ConifgModel: FC<IConifgModelProps> = ({
const ableStyle = 'bg-indigo-25 border-[#2A87F5] cursor-pointer'
const diabledStyle = 'bg-[#FFFCF5] border-[#F79009]'
useEffect(() => {
const max = params[4].max
if (completionParams.max_tokens > max * 2 / 3)
setMaxTokenSettingTipVisible(true)
else
setMaxTokenSettingTipVisible(false)
}, [params, completionParams.max_tokens, setMaxTokenSettingTipVisible])
return (
<div className='relative' ref={configContentRef}>
<div
......@@ -247,6 +257,14 @@ const ConifgModel: FC<IConifgModelProps> = ({
{params.map(({ key, ...param }) => (<ParamItem key={key} {...param} value={(completionParams as any)[key] as any} onChange={handleParamChange} />))}
</div>
</div>
{
maxTokenSettingTipVisible && (
<div className='flex py-2 pr-4 pl-5 bg-[#FFFAEB] border-t border-[#FEF0C7]'>
<AlertTriangle className='shrink-0 mr-2 mt-[3px] w-3 h-3 text-[#F79009]' />
<div className='mr-2 text-xs font-medium text-gray-700'>{t('common.model.params.maxTokenSettingTip')}</div>
</div>
)
}
</Panel>
)}
</div>
......
......@@ -23,3 +23,7 @@
.moreLikeThisPreview {
background-image: url(./preview-imgs/more-like-this.svg);
}
.speechToTextPreview {
background-image: url(./preview-imgs/speech-to-text.svg);
}
\ No newline at end of file
......@@ -7,10 +7,12 @@ import MoreLikeThisIcon from '../../../base/icons/more-like-this-icon'
import FeatureItem from './feature-item'
import Modal from '@/app/components/base/modal'
import SuggestedQuestionsAfterAnswerIcon from '@/app/components/app/configuration/base/icons/suggested-questions-after-answer-icon'
import { Microphone01 } from '@/app/components/base/icons/src/vender/solid/mediaAndDevices'
type IConfig = {
openingStatement: boolean
moreLikeThis: boolean
suggestedQuestionsAfterAnswer: boolean
speechToText: boolean
}
export type IChooseFeatureProps = {
......@@ -19,6 +21,7 @@ export type IChooseFeatureProps = {
config: IConfig
isChatApp: boolean
onChange: (key: string, value: boolean) => void
showSpeechToTextItem?: boolean
}
const OpeningStatementIcon = (
......@@ -33,6 +36,7 @@ const ChooseFeature: FC<IChooseFeatureProps> = ({
isChatApp,
config,
onChange,
showSpeechToTextItem,
}) => {
const { t } = useTranslation()
......@@ -69,6 +73,18 @@ const ChooseFeature: FC<IChooseFeatureProps> = ({
value={config.suggestedQuestionsAfterAnswer}
onChange={value => onChange('suggestedQuestionsAfterAnswer', value)}
/>
{
showSpeechToTextItem && (
<FeatureItem
icon={<Microphone01 className='w-4 h-4 text-[#7839EE]' />}
previewImgClassName='speechToTextPreview'
title={t('appDebug.feature.speechToText.title')}
description={t('appDebug.feature.speechToText.description')}
value={config.speechToText}
onChange={value => onChange('speechToText', value)}
/>
)
}
</>
</FeatureGroup>
)}
......
......@@ -7,6 +7,8 @@ function useFeature({
setMoreLikeThis,
suggestedQuestionsAfterAnswer,
setSuggestedQuestionsAfterAnswer,
speechToText,
setSpeechToText,
}: {
introduction: string
setIntroduction: (introduction: string) => void
......@@ -14,13 +16,14 @@ function useFeature({
setMoreLikeThis: (moreLikeThis: boolean) => void
suggestedQuestionsAfterAnswer: boolean
setSuggestedQuestionsAfterAnswer: (suggestedQuestionsAfterAnswer: boolean) => void
speechToText: boolean
setSpeechToText: (speechToText: boolean) => void
}) {
const [tempshowOpeningStatement, setTempShowOpeningStatement] = React.useState(!!introduction)
useEffect(() => {
// wait to api data back
if (!!introduction) {
if (introduction)
setTempShowOpeningStatement(true)
}
}, [introduction])
// const [tempMoreLikeThis, setTempMoreLikeThis] = React.useState(moreLikeThis)
......@@ -30,15 +33,16 @@ function useFeature({
const featureConfig = {
openingStatement: tempshowOpeningStatement,
moreLikeThis: moreLikeThis,
suggestedQuestionsAfterAnswer: suggestedQuestionsAfterAnswer
moreLikeThis,
suggestedQuestionsAfterAnswer,
speechToText,
}
const handleFeatureChange = (key: string, value: boolean) => {
switch (key) {
case 'openingStatement':
if (!value) {
if (!value)
setIntroduction('')
}
setTempShowOpeningStatement(value)
break
case 'moreLikeThis':
......@@ -47,11 +51,13 @@ function useFeature({
case 'suggestedQuestionsAfterAnswer':
setSuggestedQuestionsAfterAnswer(value)
break
case 'speechToText':
setSpeechToText(value)
}
}
return {
featureConfig,
handleFeatureChange
handleFeatureChange,
}
}
......
......@@ -4,6 +4,7 @@ import React from 'react'
import { useContext } from 'use-context-selector'
import produce from 'immer'
import { useBoolean } from 'ahooks'
import useSWR from 'swr'
import DatasetConfig from '../dataset-config'
import ChatGroup from '../features/chat-group'
import ExperienceEnchanceGroup from '../features/experience-enchance-group'
......@@ -19,6 +20,7 @@ import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
import ConfigVar from '@/app/components/app/configuration/config-var'
import type { PromptVariable } from '@/models/debug'
import { AppType } from '@/types/app'
import { fetchTenantInfo } from '@/service/common'
const Config: FC = () => {
const {
......@@ -33,8 +35,12 @@ const Config: FC = () => {
setMoreLikeThisConfig,
suggestedQuestionsAfterAnswerConfig,
setSuggestedQuestionsAfterAnswerConfig,
speechToTextConfig,
setSpeechToTextConfig,
} = useContext(ConfigContext)
const isChatApp = mode === AppType.chat
const { data: userInfo } = useSWR({ url: '/info' }, fetchTenantInfo)
const targetProvider = userInfo?.providers?.find(({ token_is_set, is_valid }) => token_is_set && is_valid)
const promptTemplate = modelConfig.configs.prompt_template
const promptVariables = modelConfig.configs.prompt_variables
......@@ -78,9 +84,15 @@ const Config: FC = () => {
draft.enabled = value
}))
},
speechToText: speechToTextConfig.enabled,
setSpeechToText: (value) => {
setSpeechToTextConfig(produce(speechToTextConfig, (draft) => {
draft.enabled = value
}))
},
})
const hasChatConfig = isChatApp && (featureConfig.openingStatement || featureConfig.suggestedQuestionsAfterAnswer)
const hasChatConfig = isChatApp && (featureConfig.openingStatement || featureConfig.suggestedQuestionsAfterAnswer || (featureConfig.speechToText && targetProvider?.provider_name === 'openai'))
const hasToolbox = false
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
......@@ -110,6 +122,7 @@ const Config: FC = () => {
isChatApp={isChatApp}
config={featureConfig}
onChange={handleFeatureChange}
showSpeechToTextItem={targetProvider?.provider_name === 'openai'}
/>
)}
{showAutomatic && (
......@@ -149,6 +162,7 @@ const Config: FC = () => {
}
}
isShowSuggestedQuestionsAfterAnswer={featureConfig.suggestedQuestionsAfterAnswer}
isShowSpeechText={featureConfig.speechToText}
/>
)
}
......
......@@ -38,6 +38,7 @@ const Debug: FC<IDebug> = ({
mode,
introduction,
suggestedQuestionsAfterAnswerConfig,
speechToTextConfig,
moreLikeThisConfig,
inputs,
// setInputs,
......@@ -159,6 +160,7 @@ const Debug: FC<IDebug> = ({
enabled: false,
},
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig,
speech_to_text: speechToTextConfig,
agent_mode: {
enabled: true,
tools: [...postDatasets],
......@@ -308,6 +310,7 @@ const Debug: FC<IDebug> = ({
user_input_form: promptVariablesToUserInputsForm(modelConfig.configs.prompt_variables),
opening_statement: introduction,
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig,
speech_to_text: speechToTextConfig,
more_like_this: moreLikeThisConfig,
agent_mode: {
enabled: true,
......@@ -386,6 +389,7 @@ const Debug: FC<IDebug> = ({
}}
isShowSuggestion={doShowSuggestion}
suggestionList={suggestQuestions}
isShowSpeechToText={speechToTextConfig.enabled}
/>
</div>
</div>
......
'use client'
import React, { FC } from 'react'
import type { FC } from 'react'
import React from 'react'
import { useTranslation } from 'react-i18next'
import GroupName from '../../base/group-name'
import OpeningStatement, { IOpeningStatementProps } from './opening-statement'
import type { IOpeningStatementProps } from './opening-statement'
import OpeningStatement from './opening-statement'
import SuggestedQuestionsAfterAnswer from './suggested-questions-after-answer'
import { useTranslation } from 'react-i18next'
import SpeechToText from './speech-to-text'
/*
* Include
......@@ -11,15 +14,17 @@ import { useTranslation } from 'react-i18next'
* 2. Opening Suggestion
* 3. Next question suggestion
*/
interface ChatGroupProps {
type ChatGroupProps = {
isShowOpeningStatement: boolean
openingStatementConfig: IOpeningStatementProps
isShowSuggestedQuestionsAfterAnswer: boolean
isShowSpeechText: boolean
}
const ChatGroup: FC<ChatGroupProps> = ({
isShowOpeningStatement,
openingStatementConfig,
isShowSuggestedQuestionsAfterAnswer
isShowSuggestedQuestionsAfterAnswer,
isShowSpeechText,
}) => {
const { t } = useTranslation()
......@@ -33,6 +38,11 @@ const ChatGroup: FC<ChatGroupProps> = ({
{isShowSuggestedQuestionsAfterAnswer && (
<SuggestedQuestionsAfterAnswer />
)}
{
isShowSpeechText && (
<SpeechToText />
)
}
</div>
</div>
)
......
'use client'
import React, { type FC } from 'react'
import { useTranslation } from 'react-i18next'
import Panel from '@/app/components/app/configuration/base/feature-panel'
import { Microphone01 } from '@/app/components/base/icons/src/vender/solid/mediaAndDevices'
const SuggestedQuestionsAfterAnswer: FC = () => {
const { t } = useTranslation()
return (
<Panel
title={
<div className='flex items-center gap-2'>
<div>{t('appDebug.feature.speechToText.title')}</div>
</div>
}
headerIcon={<Microphone01 className='w-4 h-4 text-[#7839EE]' />}
headerRight={
<div className='text-xs text-gray-500'>{t('appDebug.feature.speechToText.resDes')}</div>
}
noBodySpacing
/>
)
}
export default React.memo(SuggestedQuestionsAfterAnswer)
......@@ -53,6 +53,9 @@ const Configuration: FC = () => {
const [suggestedQuestionsAfterAnswerConfig, setSuggestedQuestionsAfterAnswerConfig] = useState<MoreLikeThisConfig>({
enabled: false,
})
const [speechToTextConfig, setSpeechToTextConfig] = useState<MoreLikeThisConfig>({
enabled: false,
})
const [formattingChanged, setFormattingChanged] = useState(false)
const [inputs, setInputs] = useState<Inputs>({})
const [query, setQuery] = useState('')
......@@ -73,6 +76,7 @@ const Configuration: FC = () => {
opening_statement: '',
more_like_this: null,
suggested_questions_after_answer: null,
speech_to_text: null,
dataSets: [],
})
......@@ -102,6 +106,9 @@ const Configuration: FC = () => {
setSuggestedQuestionsAfterAnswerConfig(modelConfig.suggested_questions_after_answer || {
enabled: false,
})
setSpeechToTextConfig(modelConfig.speech_to_text || {
enabled: false,
})
}
const [hasSetCustomAPIKEY, setHasSetCustomerAPIKEY] = useState(true)
......@@ -146,6 +153,9 @@ const Configuration: FC = () => {
if (modelConfig.suggested_questions_after_answer)
setSuggestedQuestionsAfterAnswerConfig(modelConfig.suggested_questions_after_answer)
if (modelConfig.speech_to_text)
setSpeechToTextConfig(modelConfig.speech_to_text)
const config = {
modelConfig: {
provider: model.provider,
......@@ -157,6 +167,7 @@ const Configuration: FC = () => {
opening_statement: modelConfig.opening_statement,
more_like_this: modelConfig.more_like_this,
suggested_questions_after_answer: modelConfig.suggested_questions_after_answer,
speech_to_text: modelConfig.speech_to_text,
dataSets: datasets || [],
},
completionParams: model.completion_params,
......@@ -187,6 +198,7 @@ const Configuration: FC = () => {
opening_statement: introduction || '',
more_like_this: moreLikeThisConfig,
suggested_questions_after_answer: suggestedQuestionsAfterAnswerConfig,
speech_to_text: speechToTextConfig,
agent_mode: {
enabled: true,
tools: [...postDatasets],
......@@ -203,6 +215,7 @@ const Configuration: FC = () => {
draft.opening_statement = introduction
draft.more_like_this = moreLikeThisConfig
draft.suggested_questions_after_answer = suggestedQuestionsAfterAnswerConfig
draft.speech_to_text = speechToTextConfig
draft.dataSets = dataSets
})
setPublishedConfig({
......@@ -245,6 +258,8 @@ const Configuration: FC = () => {
setMoreLikeThisConfig,
suggestedQuestionsAfterAnswerConfig,
setSuggestedQuestionsAfterAnswerConfig,
speechToTextConfig,
setSpeechToTextConfig,
formattingChanged,
setFormattingChanged,
inputs,
......
'use client'
import type { FC } from 'react'
import React, { useState } from 'react'
import React, { useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import cn from 'classnames'
import copy from 'copy-to-clipboard'
import { HandThumbDownIcon, HandThumbUpIcon } from '@heroicons/react/24/outline'
import { useBoolean } from 'ahooks'
import { HashtagIcon } from '@heroicons/react/24/solid'
import { Markdown } from '@/app/components/base/markdown'
import Loading from '@/app/components/base/loading'
import Toast from '@/app/components/base/toast'
......@@ -27,6 +28,8 @@ export type IGenerationItemProps = {
isMobile?: boolean
isInstalledApp: boolean
installedAppId?: string
taskId?: string
controlClearMoreLikeThis?: number
}
export const SimpleBtn = ({ className, onClick, children }: {
......@@ -81,6 +84,8 @@ const GenerationItem: FC<IGenerationItemProps> = ({
isMobile,
isInstalledApp,
installedAppId,
taskId,
controlClearMoreLikeThis,
}) => {
const { t } = useTranslation()
const isTop = depth === 1
......@@ -112,6 +117,7 @@ const GenerationItem: FC<IGenerationItemProps> = ({
isMobile,
isInstalledApp,
installedAppId,
controlClearMoreLikeThis,
}
const handleMoreLikeThis = async () => {
......@@ -138,6 +144,14 @@ const GenerationItem: FC<IGenerationItemProps> = ({
return res
})()
useEffect(() => {
if (controlClearMoreLikeThis) {
setChildMessageId(null)
setCompletionRes('')
}
}, [controlClearMoreLikeThis])
return (
<div className={cn(className, isTop ? 'rounded-xl border border-gray-200 bg-white' : 'rounded-br-xl !mt-0')}
style={isTop
......@@ -155,6 +169,12 @@ const GenerationItem: FC<IGenerationItemProps> = ({
className={cn(!isTop && 'rounded-br-xl border-l-2 border-primary-400', 'p-4')}
style={mainStyle}
>
{(isTop && taskId) && (
<div className='mb-2 text-gray-500 border border-gray-200 box-border flex items-center rounded-md italic text-[11px] pl-1 pr-1.5 font-medium w-fit group-hover:opacity-100'>
<HashtagIcon className='w-3 h-3 text-gray-400 fill-current mr-1 stroke-current stroke-1' />
{taskId}
</div>)
}
<Markdown content={content} />
{messageId && (
<div className='flex items-center justify-between mt-3'>
......
<svg width="32" height="34" viewBox="0 0 32 34" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="File Icons/csv">
<g id="sharp" filter="url(#filter0_d_6816_769)">
<path d="M4 7.73398C4 5.49377 4 4.37367 4.43597 3.51802C4.81947 2.76537 5.43139 2.15345 6.18404 1.76996C7.03969 1.33398 8.15979 1.33398 10.4 1.33398H18.6667L28 10.6673V24.2673C28 26.5075 28 27.6276 27.564 28.4833C27.1805 29.2359 26.5686 29.8478 25.816 30.2313C24.9603 30.6673 23.8402 30.6673 21.6 30.6673H10.4C8.15979 30.6673 7.03969 30.6673 6.18404 30.2313C5.43139 29.8478 4.81947 29.2359 4.43597 28.4833C4 27.6276 4 26.5075 4 24.2673V7.73398Z" fill="#169951"/>
</g>
<g id="CSV" opacity="0.96">
<path d="M13.0846 21.8908C12.8419 23.3562 11.8246 24.0562 10.5646 24.0562C9.78992 24.0562 9.20192 23.7948 8.71659 23.3095C8.01659 22.6095 8.04459 21.6762 8.04459 20.6775C8.04459 19.6788 8.01659 18.7455 8.71659 18.0455C9.20192 17.5602 9.78992 17.2988 10.5646 17.2988C11.8246 17.2988 12.8419 17.9988 13.0846 19.4642H11.4233C11.3206 19.0908 11.1153 18.7548 10.5739 18.7548C10.2753 18.7548 10.0513 18.8762 9.92992 19.0348C9.78059 19.2308 9.67792 19.4642 9.67792 20.6775C9.67792 21.8908 9.78059 22.1242 9.92992 22.3202C10.0513 22.4788 10.2753 22.6002 10.5739 22.6002C11.1153 22.6002 11.3206 22.2642 11.4233 21.8908H13.0846Z" fill="white"/>
<path d="M18.4081 21.9655C18.4081 23.3188 17.2414 24.0562 15.8414 24.0562C14.8241 24.0562 13.9934 23.8695 13.3214 23.1788L14.3668 22.1335C14.7121 22.4788 15.3188 22.6002 15.8508 22.6002C16.4948 22.6002 16.8028 22.3855 16.8028 22.0028C16.8028 21.8442 16.7654 21.7135 16.6721 21.6108C16.5881 21.5268 16.4481 21.4615 16.2334 21.4335L15.4308 21.3215C14.8428 21.2375 14.3948 21.0415 14.0961 20.7335C13.7881 20.4162 13.6388 19.9682 13.6388 19.3988C13.6388 18.1855 14.5534 17.2988 16.0654 17.2988C17.0174 17.2988 17.7361 17.5228 18.3054 18.0922L17.2788 19.1188C16.8588 18.6988 16.3081 18.7268 16.0188 18.7268C15.4494 18.7268 15.2161 19.0535 15.2161 19.3428C15.2161 19.4268 15.2441 19.5482 15.3468 19.6508C15.4308 19.7348 15.5708 19.8188 15.8041 19.8468L16.6068 19.9588C17.2041 20.0428 17.6334 20.2295 17.9134 20.5095C18.2681 20.8548 18.4081 21.3495 18.4081 21.9655Z" fill="white"/>
<path d="M24.4166 17.3548L22.214 24.0002H21.0006L18.8073 17.3548H20.4966L21.6166 21.0695L22.718 17.3548H24.4166Z" fill="white"/>
</g>
<path id="bevel" opacity="0.5" d="M18.6667 1.33398L28.0001 10.6673H21.3334C19.8607 10.6673 18.6667 9.47341 18.6667 8.00065V1.33398Z" fill="white"/>
</g>
<defs>
<filter id="filter0_d_6816_769" x="2" y="0.333984" width="28" height="33.334" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0" result="hardAlpha"/>
<feOffset dy="1"/>
<feGaussianBlur stdDeviation="1"/>
<feColorMatrix type="matrix" values="0 0 0 0 0.0627451 0 0 0 0 0.0941176 0 0 0 0 0.156863 0 0 0 0.05 0"/>
<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow_6816_769"/>
<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow_6816_769" result="shape"/>
</filter>
</defs>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_6037_51601)">
<path d="M7.99992 1.33398V4.00065M7.99992 12.0007V14.6673M3.99992 8.00065H1.33325M14.6666 8.00065H11.9999M12.7189 12.7196L10.8333 10.834M12.7189 3.33395L10.8333 5.21956M3.28097 12.7196L5.16659 10.834M3.28097 3.33395L5.16659 5.21956" stroke="#667085" stroke-width="1.25" stroke-linecap="round" stroke-linejoin="round"/>
</g>
<defs>
<clipPath id="clip0_6037_51601">
<rect width="16" height="16" fill="white"/>
</clipPath>
</defs>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="x-close">
<path id="Icon" d="M12 4L4 12M4 4L12 12" stroke="#667085" stroke-width="1.25" stroke-linecap="round" stroke-linejoin="round"/>
</g>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="x">
<path id="Icon" d="M11.3334 4.66663L4.66675 11.3333M4.66675 4.66663L11.3334 11.3333" stroke="#667085" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
</g>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="microphone-01">
<path id="Icon" d="M12.6666 6.66732V8.00065C12.6666 10.578 10.5772 12.6673 7.99992 12.6673M3.33325 6.66732V8.00065C3.33325 10.578 5.42259 12.6673 7.99992 12.6673M7.99992 12.6673V14.6673M5.33325 14.6673H10.6666M7.99992 10.0007C6.89535 10.0007 5.99992 9.10522 5.99992 8.00065V3.33398C5.99992 2.22941 6.89535 1.33398 7.99992 1.33398C9.10449 1.33398 9.99992 2.22941 9.99992 3.33398V8.00065C9.99992 9.10522 9.10449 10.0007 7.99992 10.0007Z" stroke="#667085" stroke-width="1.25" stroke-linecap="round" stroke-linejoin="round"/>
</g>
</svg>
<svg width="12" height="12" viewBox="0 0 12 12" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="alert-triangle">
<path id="Solid" fill-rule="evenodd" clip-rule="evenodd" d="M6.40616 0.834185C6.14751 0.719172 5.85222 0.719172 5.59356 0.834185C5.3938 0.923011 5.26403 1.07947 5.17373 1.20696C5.08495 1.3323 4.9899 1.49651 4.88536 1.67711L0.751783 8.81693C0.646828 8.99818 0.551451 9.16289 0.486781 9.30268C0.421056 9.44475 0.349754 9.63572 0.372478 9.85369C0.401884 10.1357 0.549654 10.392 0.779012 10.5588C0.956259 10.6877 1.15726 10.7217 1.31314 10.736C1.46651 10.75 1.65684 10.75 1.86628 10.75H10.1334C10.3429 10.75 10.5332 10.75 10.6866 10.736C10.8425 10.7217 11.0435 10.6877 11.2207 10.5588C11.4501 10.392 11.5978 10.1357 11.6272 9.85369C11.65 9.63572 11.5787 9.44475 11.5129 9.30268C11.4483 9.1629 11.3529 8.9982 11.248 8.81697L7.11436 1.67709C7.00983 1.49651 6.91477 1.3323 6.82599 1.20696C6.73569 1.07947 6.60593 0.923011 6.40616 0.834185ZM6.49988 4.5C6.49988 4.22386 6.27602 4 5.99988 4C5.72374 4 5.49988 4.22386 5.49988 4.5V6.5C5.49988 6.77614 5.72374 7 5.99988 7C6.27602 7 6.49988 6.77614 6.49988 6.5V4.5ZM5.99988 8C5.72374 8 5.49988 8.22386 5.49988 8.5C5.49988 8.77614 5.72374 9 5.99988 9H6.00488C6.28102 9 6.50488 8.77614 6.50488 8.5C6.50488 8.22386 6.28102 8 6.00488 8H5.99988Z" fill="#F79009"/>
</g>
</svg>
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M21 21H3M18 11L12 17M12 17L6 11M12 17V3" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path id="Solid" fill-rule="evenodd" clip-rule="evenodd" d="M8.00008 0.666016C3.94999 0.666016 0.666748 3.94926 0.666748 7.99935C0.666748 12.0494 3.94999 15.3327 8.00008 15.3327C12.0502 15.3327 15.3334 12.0494 15.3334 7.99935C15.3334 3.94926 12.0502 0.666016 8.00008 0.666016ZM10.4715 5.52794C10.7318 5.78829 10.7318 6.2104 10.4715 6.47075L8.94289 7.99935L10.4715 9.52794C10.7318 9.78829 10.7318 10.2104 10.4715 10.4708C10.2111 10.7311 9.78903 10.7311 9.52868 10.4708L8.00008 8.94216L6.47149 10.4708C6.21114 10.7311 5.78903 10.7311 5.52868 10.4708C5.26833 10.2104 5.26833 9.78829 5.52868 9.52794L7.05727 7.99935L5.52868 6.47075C5.26833 6.2104 5.26833 5.78829 5.52868 5.52794C5.78903 5.26759 6.21114 5.26759 6.47149 5.52794L8.00008 7.05654L9.52868 5.52794C9.78903 5.26759 10.2111 5.26759 10.4715 5.52794Z" fill="#98A2B3"/>
</svg>
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="microphone-01">
<g id="Solid">
<path fill-rule="evenodd" clip-rule="evenodd" d="M8.00008 0.666016C6.52732 0.666016 5.33341 1.85992 5.33341 3.33268V7.99935C5.33341 9.47211 6.52732 10.666 8.00008 10.666C9.47284 10.666 10.6667 9.47211 10.6667 7.99935V3.33268C10.6667 1.85992 9.47284 0.666016 8.00008 0.666016Z" fill="#155EEF"/>
<path d="M4.00008 6.66602C4.00008 6.29783 3.7016 5.99935 3.33341 5.99935C2.96522 5.99935 2.66675 6.29783 2.66675 6.66602V7.99935C2.66675 10.7195 4.70319 12.9641 7.33466 13.2916C7.33384 13.3052 7.33341 13.3189 7.33341 13.3327V13.9993H5.33341C4.96522 13.9993 4.66675 14.2978 4.66675 14.666C4.66675 15.0342 4.96522 15.3327 5.33341 15.3327H10.6667C11.0349 15.3327 11.3334 15.0342 11.3334 14.666C11.3334 14.2978 11.0349 13.9993 10.6667 13.9993H8.66675V13.3327C8.66675 13.3189 8.66633 13.3052 8.6655 13.2916C11.297 12.9641 13.3334 10.7195 13.3334 7.99935V6.66602C13.3334 6.29783 13.0349 5.99935 12.6667 5.99935C12.2986 5.99935 12.0001 6.29783 12.0001 6.66602V7.99935C12.0001 10.2085 10.2092 11.9993 8.00008 11.9993C5.79094 11.9993 4.00008 10.2085 4.00008 7.99935V6.66602Z" fill="#155EEF"/>
</g>
</g>
</svg>
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
<g id="stop-circle">
<path id="Solid" fill-rule="evenodd" clip-rule="evenodd" d="M9.99992 0.833984C4.93731 0.833984 0.833252 4.93804 0.833252 10.0007C0.833252 15.0633 4.93731 19.1673 9.99992 19.1673C15.0625 19.1673 19.1666 15.0633 19.1666 10.0007C19.1666 4.93804 15.0625 0.833984 9.99992 0.833984ZM6.75741 7.12232C6.66658 7.30058 6.66658 7.53394 6.66658 8.00065V12.0006C6.66658 12.4674 6.66658 12.7007 6.75741 12.879C6.83731 13.0358 6.96479 13.1633 7.12159 13.2432C7.29985 13.334 7.53321 13.334 7.99992 13.334H11.9999C12.4666 13.334 12.7 13.334 12.8782 13.2432C13.035 13.1633 13.1625 13.0358 13.2424 12.879C13.3333 12.7007 13.3333 12.4674 13.3333 12.0006V8.00065C13.3333 7.53394 13.3333 7.30058 13.2424 7.12232C13.1625 6.96552 13.035 6.83804 12.8782 6.75814C12.7 6.66732 12.4666 6.66732 11.9999 6.66732H7.99992C7.53321 6.66732 7.29985 6.66732 7.12159 6.75814C6.96479 6.83804 6.83731 6.96552 6.75741 7.12232Z" fill="#155EEF"/>
</g>
</svg>
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "32",
"height": "34",
"viewBox": "0 0 32 34",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "File Icons/csv"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "sharp",
"filter": "url(#filter0_d_6816_769)"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"d": "M4 7.73398C4 5.49377 4 4.37367 4.43597 3.51802C4.81947 2.76537 5.43139 2.15345 6.18404 1.76996C7.03969 1.33398 8.15979 1.33398 10.4 1.33398H18.6667L28 10.6673V24.2673C28 26.5075 28 27.6276 27.564 28.4833C27.1805 29.2359 26.5686 29.8478 25.816 30.2313C24.9603 30.6673 23.8402 30.6673 21.6 30.6673H10.4C8.15979 30.6673 7.03969 30.6673 6.18404 30.2313C5.43139 29.8478 4.81947 29.2359 4.43597 28.4833C4 27.6276 4 26.5075 4 24.2673V7.73398Z",
"fill": "#169951"
},
"children": []
}
]
},
{
"type": "element",
"name": "g",
"attributes": {
"id": "CSV",
"opacity": "0.96"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"d": "M13.0846 21.8908C12.8419 23.3562 11.8246 24.0562 10.5646 24.0562C9.78992 24.0562 9.20192 23.7948 8.71659 23.3095C8.01659 22.6095 8.04459 21.6762 8.04459 20.6775C8.04459 19.6788 8.01659 18.7455 8.71659 18.0455C9.20192 17.5602 9.78992 17.2988 10.5646 17.2988C11.8246 17.2988 12.8419 17.9988 13.0846 19.4642H11.4233C11.3206 19.0908 11.1153 18.7548 10.5739 18.7548C10.2753 18.7548 10.0513 18.8762 9.92992 19.0348C9.78059 19.2308 9.67792 19.4642 9.67792 20.6775C9.67792 21.8908 9.78059 22.1242 9.92992 22.3202C10.0513 22.4788 10.2753 22.6002 10.5739 22.6002C11.1153 22.6002 11.3206 22.2642 11.4233 21.8908H13.0846Z",
"fill": "white"
},
"children": []
},
{
"type": "element",
"name": "path",
"attributes": {
"d": "M18.4081 21.9655C18.4081 23.3188 17.2414 24.0562 15.8414 24.0562C14.8241 24.0562 13.9934 23.8695 13.3214 23.1788L14.3668 22.1335C14.7121 22.4788 15.3188 22.6002 15.8508 22.6002C16.4948 22.6002 16.8028 22.3855 16.8028 22.0028C16.8028 21.8442 16.7654 21.7135 16.6721 21.6108C16.5881 21.5268 16.4481 21.4615 16.2334 21.4335L15.4308 21.3215C14.8428 21.2375 14.3948 21.0415 14.0961 20.7335C13.7881 20.4162 13.6388 19.9682 13.6388 19.3988C13.6388 18.1855 14.5534 17.2988 16.0654 17.2988C17.0174 17.2988 17.7361 17.5228 18.3054 18.0922L17.2788 19.1188C16.8588 18.6988 16.3081 18.7268 16.0188 18.7268C15.4494 18.7268 15.2161 19.0535 15.2161 19.3428C15.2161 19.4268 15.2441 19.5482 15.3468 19.6508C15.4308 19.7348 15.5708 19.8188 15.8041 19.8468L16.6068 19.9588C17.2041 20.0428 17.6334 20.2295 17.9134 20.5095C18.2681 20.8548 18.4081 21.3495 18.4081 21.9655Z",
"fill": "white"
},
"children": []
},
{
"type": "element",
"name": "path",
"attributes": {
"d": "M24.4166 17.3548L22.214 24.0002H21.0006L18.8073 17.3548H20.4966L21.6166 21.0695L22.718 17.3548H24.4166Z",
"fill": "white"
},
"children": []
}
]
},
{
"type": "element",
"name": "path",
"attributes": {
"id": "bevel",
"opacity": "0.5",
"d": "M18.6667 1.33398L28.0001 10.6673H21.3334C19.8607 10.6673 18.6667 9.47341 18.6667 8.00065V1.33398Z",
"fill": "white"
},
"children": []
}
]
},
{
"type": "element",
"name": "defs",
"attributes": {},
"children": [
{
"type": "element",
"name": "filter",
"attributes": {
"id": "filter0_d_6816_769",
"x": "2",
"y": "0.333984",
"width": "28",
"height": "33.334",
"filterUnits": "userSpaceOnUse",
"color-interpolation-filters": "sRGB"
},
"children": [
{
"type": "element",
"name": "feFlood",
"attributes": {
"flood-opacity": "0",
"result": "BackgroundImageFix"
},
"children": []
},
{
"type": "element",
"name": "feColorMatrix",
"attributes": {
"in": "SourceAlpha",
"type": "matrix",
"values": "0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0",
"result": "hardAlpha"
},
"children": []
},
{
"type": "element",
"name": "feOffset",
"attributes": {
"dy": "1"
},
"children": []
},
{
"type": "element",
"name": "feGaussianBlur",
"attributes": {
"stdDeviation": "1"
},
"children": []
},
{
"type": "element",
"name": "feColorMatrix",
"attributes": {
"type": "matrix",
"values": "0 0 0 0 0.0627451 0 0 0 0 0.0941176 0 0 0 0 0.156863 0 0 0 0.05 0"
},
"children": []
},
{
"type": "element",
"name": "feBlend",
"attributes": {
"mode": "normal",
"in2": "BackgroundImageFix",
"result": "effect1_dropShadow_6816_769"
},
"children": []
},
{
"type": "element",
"name": "feBlend",
"attributes": {
"mode": "normal",
"in": "SourceGraphic",
"in2": "effect1_dropShadow_6816_769",
"result": "shape"
},
"children": []
}
]
}
]
}
]
},
"name": "Csv"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './Csv.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
export { default as Csv } from './Csv'
export { default as Md } from './Md'
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "16",
"height": "16",
"viewBox": "0 0 16 16",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"clip-path": "url(#clip0_6037_51601)"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"d": "M7.99992 1.33398V4.00065M7.99992 12.0007V14.6673M3.99992 8.00065H1.33325M14.6666 8.00065H11.9999M12.7189 12.7196L10.8333 10.834M12.7189 3.33395L10.8333 5.21956M3.28097 12.7196L5.16659 10.834M3.28097 3.33395L5.16659 5.21956",
"stroke": "currentColor",
"stroke-width": "1.25",
"stroke-linecap": "round",
"stroke-linejoin": "round"
},
"children": []
}
]
},
{
"type": "element",
"name": "defs",
"attributes": {},
"children": [
{
"type": "element",
"name": "clipPath",
"attributes": {
"id": "clip0_6037_51601"
},
"children": [
{
"type": "element",
"name": "rect",
"attributes": {
"width": "16",
"height": "16",
"fill": "white"
},
"children": []
}
]
}
]
}
]
},
"name": "Loading02"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './Loading02.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "16",
"height": "16",
"viewBox": "0 0 16 16",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "x"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"id": "Icon",
"d": "M11.3334 4.66663L4.66675 11.3333M4.66675 4.66663L11.3334 11.3333",
"stroke": "currentColor",
"stroke-width": "1.5",
"stroke-linecap": "round",
"stroke-linejoin": "round"
},
"children": []
}
]
}
]
},
"name": "X"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './X.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "16",
"height": "16",
"viewBox": "0 0 16 16",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "x-close"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"id": "Icon",
"d": "M12 4L4 12M4 4L12 12",
"stroke": "currentColor",
"stroke-width": "1.25",
"stroke-linecap": "round",
"stroke-linejoin": "round"
},
"children": []
}
]
}
]
},
"name": "XClose"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './XClose.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
export { default as Loading02 } from './Loading02'
export { default as Trash03 } from './Trash03'
export { default as XClose } from './XClose'
export { default as X } from './X'
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "16",
"height": "16",
"viewBox": "0 0 16 16",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "microphone-01"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"id": "Icon",
"d": "M12.6666 6.66732V8.00065C12.6666 10.578 10.5772 12.6673 7.99992 12.6673M3.33325 6.66732V8.00065C3.33325 10.578 5.42259 12.6673 7.99992 12.6673M7.99992 12.6673V14.6673M5.33325 14.6673H10.6666M7.99992 10.0007C6.89535 10.0007 5.99992 9.10522 5.99992 8.00065V3.33398C5.99992 2.22941 6.89535 1.33398 7.99992 1.33398C9.10449 1.33398 9.99992 2.22941 9.99992 3.33398V8.00065C9.99992 9.10522 9.10449 10.0007 7.99992 10.0007Z",
"stroke": "currentColor",
"stroke-width": "1.25",
"stroke-linecap": "round",
"stroke-linejoin": "round"
},
"children": []
}
]
}
]
},
"name": "Microphone01"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './Microphone01.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
export { default as Microphone01 } from './Microphone01'
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "12",
"height": "12",
"viewBox": "0 0 12 12",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "alert-triangle"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"id": "Solid",
"fill-rule": "evenodd",
"clip-rule": "evenodd",
"d": "M6.40616 0.834185C6.14751 0.719172 5.85222 0.719172 5.59356 0.834185C5.3938 0.923011 5.26403 1.07947 5.17373 1.20696C5.08495 1.3323 4.9899 1.49651 4.88536 1.67711L0.751783 8.81693C0.646828 8.99818 0.551451 9.16289 0.486781 9.30268C0.421056 9.44475 0.349754 9.63572 0.372478 9.85369C0.401884 10.1357 0.549654 10.392 0.779012 10.5588C0.956259 10.6877 1.15726 10.7217 1.31314 10.736C1.46651 10.75 1.65684 10.75 1.86628 10.75H10.1334C10.3429 10.75 10.5332 10.75 10.6866 10.736C10.8425 10.7217 11.0435 10.6877 11.2207 10.5588C11.4501 10.392 11.5978 10.1357 11.6272 9.85369C11.65 9.63572 11.5787 9.44475 11.5129 9.30268C11.4483 9.1629 11.3529 8.9982 11.248 8.81697L7.11436 1.67709C7.00983 1.49651 6.91477 1.3323 6.82599 1.20696C6.73569 1.07947 6.60593 0.923011 6.40616 0.834185ZM6.49988 4.5C6.49988 4.22386 6.27602 4 5.99988 4C5.72374 4 5.49988 4.22386 5.49988 4.5V6.5C5.49988 6.77614 5.72374 7 5.99988 7C6.27602 7 6.49988 6.77614 6.49988 6.5V4.5ZM5.99988 8C5.72374 8 5.49988 8.22386 5.49988 8.5C5.49988 8.77614 5.72374 9 5.99988 9H6.00488C6.28102 9 6.50488 8.77614 6.50488 8.5C6.50488 8.22386 6.28102 8 6.00488 8H5.99988Z",
"fill": "currentColor"
},
"children": []
}
]
}
]
},
"name": "AlertTriangle"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './AlertTriangle.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
export { default as AlertTriangle } from './AlertTriangle'
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "24",
"height": "24",
"viewBox": "0 0 24 24",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"d": "M21 21H3M18 11L12 17M12 17L6 11M12 17V3",
"stroke": "currentColor",
"stroke-width": "2",
"stroke-linecap": "round",
"stroke-linejoin": "round"
},
"children": []
}
]
},
"name": "Download02"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './Download02.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "16",
"height": "16",
"viewBox": "0 0 16 16",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"id": "Solid",
"fill-rule": "evenodd",
"clip-rule": "evenodd",
"d": "M8.00008 0.666016C3.94999 0.666016 0.666748 3.94926 0.666748 7.99935C0.666748 12.0494 3.94999 15.3327 8.00008 15.3327C12.0502 15.3327 15.3334 12.0494 15.3334 7.99935C15.3334 3.94926 12.0502 0.666016 8.00008 0.666016ZM10.4715 5.52794C10.7318 5.78829 10.7318 6.2104 10.4715 6.47075L8.94289 7.99935L10.4715 9.52794C10.7318 9.78829 10.7318 10.2104 10.4715 10.4708C10.2111 10.7311 9.78903 10.7311 9.52868 10.4708L8.00008 8.94216L6.47149 10.4708C6.21114 10.7311 5.78903 10.7311 5.52868 10.4708C5.26833 10.2104 5.26833 9.78829 5.52868 9.52794L7.05727 7.99935L5.52868 6.47075C5.26833 6.2104 5.26833 5.78829 5.52868 5.52794C5.78903 5.26759 6.21114 5.26759 6.47149 5.52794L8.00008 7.05654L9.52868 5.52794C9.78903 5.26759 10.2111 5.26759 10.4715 5.52794Z",
"fill": "currentColor"
},
"children": []
}
]
},
"name": "XCircle"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './XCircle.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
export { default as Download02 } from './Download02'
export { default as XCircle } from './XCircle'
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "16",
"height": "16",
"viewBox": "0 0 16 16",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "microphone-01"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "Solid"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"fill-rule": "evenodd",
"clip-rule": "evenodd",
"d": "M8.00008 0.666016C6.52732 0.666016 5.33341 1.85992 5.33341 3.33268V7.99935C5.33341 9.47211 6.52732 10.666 8.00008 10.666C9.47284 10.666 10.6667 9.47211 10.6667 7.99935V3.33268C10.6667 1.85992 9.47284 0.666016 8.00008 0.666016Z",
"fill": "currentColor"
},
"children": []
},
{
"type": "element",
"name": "path",
"attributes": {
"d": "M4.00008 6.66602C4.00008 6.29783 3.7016 5.99935 3.33341 5.99935C2.96522 5.99935 2.66675 6.29783 2.66675 6.66602V7.99935C2.66675 10.7195 4.70319 12.9641 7.33466 13.2916C7.33384 13.3052 7.33341 13.3189 7.33341 13.3327V13.9993H5.33341C4.96522 13.9993 4.66675 14.2978 4.66675 14.666C4.66675 15.0342 4.96522 15.3327 5.33341 15.3327H10.6667C11.0349 15.3327 11.3334 15.0342 11.3334 14.666C11.3334 14.2978 11.0349 13.9993 10.6667 13.9993H8.66675V13.3327C8.66675 13.3189 8.66633 13.3052 8.6655 13.2916C11.297 12.9641 13.3334 10.7195 13.3334 7.99935V6.66602C13.3334 6.29783 13.0349 5.99935 12.6667 5.99935C12.2986 5.99935 12.0001 6.29783 12.0001 6.66602V7.99935C12.0001 10.2085 10.2092 11.9993 8.00008 11.9993C5.79094 11.9993 4.00008 10.2085 4.00008 7.99935V6.66602Z",
"fill": "currentColor"
},
"children": []
}
]
}
]
}
]
},
"name": "Microphone01"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './Microphone01.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "20",
"height": "20",
"viewBox": "0 0 20 20",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "g",
"attributes": {
"id": "stop-circle"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"id": "Solid",
"fill-rule": "evenodd",
"clip-rule": "evenodd",
"d": "M9.99992 0.833984C4.93731 0.833984 0.833252 4.93804 0.833252 10.0007C0.833252 15.0633 4.93731 19.1673 9.99992 19.1673C15.0625 19.1673 19.1666 15.0633 19.1666 10.0007C19.1666 4.93804 15.0625 0.833984 9.99992 0.833984ZM6.75741 7.12232C6.66658 7.30058 6.66658 7.53394 6.66658 8.00065V12.0006C6.66658 12.4674 6.66658 12.7007 6.75741 12.879C6.83731 13.0358 6.96479 13.1633 7.12159 13.2432C7.29985 13.334 7.53321 13.334 7.99992 13.334H11.9999C12.4666 13.334 12.7 13.334 12.8782 13.2432C13.035 13.1633 13.1625 13.0358 13.2424 12.879C13.3333 12.7007 13.3333 12.4674 13.3333 12.0006V8.00065C13.3333 7.53394 13.3333 7.30058 13.2424 7.12232C13.1625 6.96552 13.035 6.83804 12.8782 6.75814C12.7 6.66732 12.4666 6.66732 11.9999 6.66732H7.99992C7.53321 6.66732 7.29985 6.66732 7.12159 6.75814C6.96479 6.83804 6.83731 6.96552 6.75741 7.12232Z",
"fill": "currentColor"
},
"children": []
}
]
}
]
},
"name": "StopCircle"
}
\ No newline at end of file
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './StopCircle.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
export default Icon
export { default as Microphone01 } from './Microphone01'
export { default as StopCircle } from './StopCircle'
'use client'
import React, { FC } from 'react'
import type { FC } from 'react'
import React from 'react'
import cn from 'classnames'
import s from './style.module.css'
export interface ITabHeaderProps {
items: {
type Item = {
id: string
name: string
isRight?: boolean
extra?: React.ReactNode
}[]
}
export type ITabHeaderProps = {
items: Item[]
value: string
onChange: (value: string) => void
}
......@@ -17,20 +21,26 @@ export interface ITabHeaderProps {
const TabHeader: FC<ITabHeaderProps> = ({
items,
value,
onChange
onChange,
}) => {
return (
<div className='flex space-x-4 border-b border-gray-200 '>
{items.map(({ id, name, extra }) => (
const renderItem = ({ id, name, extra }: Item) => (
<div
key={id}
className={cn(id === value ? `${s.itemActive} text-gray-900` : 'text-gray-500', 'relative flex items-center pb-1.5 leading-6 cursor-pointer')}
onClick={() => onChange(id)}
>
<div className='text-base font-semibold'>{name}</div>
{extra ? extra : ''}
{extra || ''}
</div>
)
return (
<div className='flex justify-between border-b border-gray-200 '>
<div className='flex space-x-4'>
{items.filter(item => !item.isRight).map(renderItem)}
</div>
<div className='flex space-x-4'>
{items.filter(item => item.isRight).map(renderItem)}
</div>
))}
</div>
)
}
......
.wrapper {
background: linear-gradient(131deg, #2250F2 0%, #0EBCF3 100%);
box-shadow: 0px 4px 6px -2px rgba(16, 24, 40, 0.03), 0px 12px 16px -4px rgba(16, 24, 40, 0.08);
}
.convert {
background: linear-gradient(91.92deg, #104AE1 -1.74%, #0098EE 75.74%);
background-clip: text;
color: transparent;
}
\ No newline at end of file
import { useCallback, useEffect, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useParams, usePathname } from 'next/navigation'
import cn from 'classnames'
import Recorder from 'js-audio-recorder'
import { useRafInterval } from 'ahooks'
import s from './index.module.css'
import { StopCircle } from '@/app/components/base/icons/src/vender/solid/mediaAndDevices'
import { Loading02, XClose } from '@/app/components/base/icons/src/vender/line/general'
import { audioToText } from '@/service/share'
type VoiceInputTypes = {
onConverted: (text: string) => void
onCancel: () => void
}
const VoiceInput = ({
onCancel,
onConverted,
}: VoiceInputTypes) => {
const { t } = useTranslation()
const recorder = useRef(new Recorder())
const canvasRef = useRef<HTMLCanvasElement | null>(null)
const ctxRef = useRef<CanvasRenderingContext2D | null>(null)
const drawRecordId = useRef<number | null>(null)
const [originDuration, setOriginDuration] = useState(0)
const [startRecord, setStartRecord] = useState(false)
const [startConvert, setStartConvert] = useState(false)
const pathname = usePathname()
const params = useParams()
const clearInterval = useRafInterval(() => {
setOriginDuration(originDuration + 1)
}, 1000)
const drawRecord = useCallback(() => {
drawRecordId.current = requestAnimationFrame(drawRecord)
const canvas = canvasRef.current!
const ctx = ctxRef.current!
const dataUnit8Array = recorder.current.getRecordAnalyseData()
const dataArray = [].slice.call(dataUnit8Array)
const lineLength = parseInt(`${canvas.width / 3}`)
const gap = parseInt(`${1024 / lineLength}`)
ctx.clearRect(0, 0, canvas.width, canvas.height)
ctx.beginPath()
let x = 0
for (let i = 0; i < lineLength; i++) {
let v = dataArray.slice(i * gap, i * gap + gap).reduce((prev: number, next: number) => {
return prev + next
}, 0) / gap
if (v < 128)
v = 128
if (v > 178)
v = 178
const y = (v - 128) / 50 * canvas.height
ctx.moveTo(x, 16)
if (ctx.roundRect)
ctx.roundRect(x, 16 - y, 2, y, [1, 1, 0, 0])
else
ctx.rect(x, 16 - y, 2, y)
ctx.fill()
x += 3
}
ctx.closePath()
}, [])
const handleStopRecorder = useCallback(async () => {
clearInterval()
setStartRecord(false)
setStartConvert(true)
recorder.current.stop()
drawRecordId.current && cancelAnimationFrame(drawRecordId.current)
drawRecordId.current = null
const canvas = canvasRef.current!
const ctx = ctxRef.current!
ctx.clearRect(0, 0, canvas.width, canvas.height)
const wavBlob = recorder.current.getWAVBlob()
const wavFile = new File([wavBlob], 'a.wav', { type: 'audio/wav' })
const formData = new FormData()
formData.append('file', wavFile)
let url = ''
let isPublic = false
if (params.token) {
url = '/audio-to-text'
isPublic = true
}
else if (params.appId) {
if (pathname.search('explore/installed') > -1)
url = `/installed-apps/${params.appId}/audio-to-text`
else
url = `/apps/${params.appId}/audio-to-text`
}
try {
const audioResponse = await audioToText(url, isPublic, formData)
onConverted(audioResponse.text)
onCancel()
}
catch (e) {
onConverted('')
onCancel()
}
}, [])
const handleStartRecord = async () => {
try {
await recorder.current.start()
setStartRecord(true)
setStartConvert(false)
if (canvasRef.current && ctxRef.current)
drawRecord()
}
catch (e) {
onCancel()
}
}
const initCanvas = () => {
const dpr = window.devicePixelRatio || 1
const canvas = document.getElementById('voice-input-record') as HTMLCanvasElement
if (canvas) {
const { width: cssWidth, height: cssHeight } = canvas.getBoundingClientRect()
canvas.width = dpr * cssWidth
canvas.height = dpr * cssHeight
canvasRef.current = canvas
const ctx = canvas.getContext('2d')
if (ctx) {
ctx.scale(dpr, dpr)
ctx.fillStyle = 'rgba(209, 224, 255, 1)'
ctxRef.current = ctx
}
}
}
if (originDuration >= 120 && startRecord)
handleStopRecorder()
useEffect(() => {
initCanvas()
handleStartRecord()
}, [])
const minutes = parseInt(`${parseInt(`${originDuration}`) / 60}`)
const seconds = parseInt(`${originDuration}`) % 60
return (
<div className={cn(s.wrapper, 'absolute inset-0 rounded-xl')}>
<div className='absolute inset-[1.5px] flex items-center pl-[14.5px] pr-[6.5px] py-[14px] bg-primary-25 rounded-[10.5px] overflow-hidden'>
<canvas id='voice-input-record' className='absolute left-0 bottom-0 w-full h-4' />
{
startConvert && <Loading02 className='animate-spin mr-2 w-4 h-4 text-primary-700' />
}
<div className='grow'>
{
startRecord && (
<div className='text-sm text-gray-500'>
{t('common.voiceInput.speaking')}
</div>
)
}
{
startConvert && (
<div className={cn(s.convert, 'text-sm')}>
{t('common.voiceInput.converting')}
</div>
)
}
</div>
{
startRecord && (
<div
className='flex justify-center items-center mr-1 w-8 h-8 hover:bg-primary-100 rounded-lg cursor-pointer'
onClick={handleStopRecorder}
>
<StopCircle className='w-5 h-5 text-primary-600' />
</div>
)
}
{
startConvert && (
<div
className='flex justify-center items-center mr-1 w-8 h-8 hover:bg-gray-200 rounded-lg cursor-pointer'
onClick={onCancel}
>
<XClose className='w-4 h-4 text-gray-500' />
</div>
)
}
<div className={`w-[45px] pl-1 text-xs font-medium ${originDuration > 110 ? 'text-[#F04438]' : 'text-gray-700'}`}>{`0${minutes.toFixed(0)}:${seconds >= 10 ? seconds : `0${seconds}`}`}</div>
</div>
</div>
)
}
export default VoiceInput
......@@ -30,6 +30,7 @@
.sourceItem .info {
display: flex;
align-items: center;
z-index: 1;
}
.sourceItem .info .name {
font-weight: 500;
......@@ -45,6 +46,7 @@
font-size: 12px;
line-height: 18px;
color: #344054;
z-index: 1;
}
.sourceItem .error {
color: #D92D20;
......
......@@ -74,7 +74,7 @@ const StepOne = ({
setCurrentFile(file)
}
const hideFilePreview = () => {
setCurrentNotionPage(undefined)
setCurrentFile(undefined)
}
const updateCurrentPage = (page: Page) => {
......
......@@ -57,7 +57,7 @@ export const useIndexStatus = () => {
return {
queuing: { color: 'orange', text: t('datasetDocuments.list.status.queuing') }, // waiting
indexing: { color: 'blue', text: t('datasetDocuments.list.status.indexing') }, // indexing splitting parsing cleaning
paused: { color: 'orange', text: t('datasetDocuments.list.status.parsed') }, // paused
paused: { color: 'orange', text: t('datasetDocuments.list.status.paused') }, // paused
error: { color: 'red', text: t('datasetDocuments.list.status.error') }, // error
available: { color: 'green', text: t('datasetDocuments.list.status.available') }, // completed,archived = false,enabled = true
enabled: { color: 'green', text: t('datasetDocuments.list.status.enabled') }, // completed,archived = false,enabled = true
......
......@@ -29,7 +29,7 @@ const InstalledApp: FC<IInstalledAppProps> = ({
<div className='h-full p-2'>
{installedApp?.app.mode === 'chat'
? (
<ChatApp isInstalledApp installedAppInfo={installedApp}/>
<ChatApp isInstalledApp installedAppInfo={installedApp} />
)
: (
<TextGenerationApp isInstalledApp installedAppInfo={installedApp}/>
......
......@@ -8,13 +8,26 @@ import { useContext } from 'use-context-selector'
import produce from 'immer'
import { useBoolean, useGetState } from 'ahooks'
import AppUnavailable from '../../base/app-unavailable'
import { checkOrSetAccessToken } from '../utils'
import useConversation from './hooks/use-conversation'
import s from './style.module.css'
import { ToastContext } from '@/app/components/base/toast'
import Sidebar from '@/app/components/share/chat/sidebar'
import ConfigSence from '@/app/components/share/chat/config-scence'
import Header from '@/app/components/share/header'
import { delConversation, fetchAppInfo, fetchAppParams, fetchChatList, fetchConversations, fetchSuggestedQuestions, pinConversation, sendChatMessage, stopChatMessageResponding, unpinConversation, updateFeedback } from '@/service/share'
import {
delConversation,
fetchAppInfo,
fetchAppParams,
fetchChatList,
fetchConversations,
fetchSuggestedQuestions,
pinConversation,
sendChatMessage,
stopChatMessageResponding,
unpinConversation,
updateFeedback,
} from '@/service/share'
import type { ConversationItem, SiteInfo } from '@/models/share'
import type { PromptConfig, SuggestedQuestionsAfterAnswerConfig } from '@/models/debug'
import type { Feedbacktype, IChatItem } from '@/app/components/app/chat'
......@@ -149,6 +162,7 @@ const Main: FC<IMainProps> = ({
}
const [suggestedQuestionsAfterAnswerConfig, setSuggestedQuestionsAfterAnswerConfig] = useState<SuggestedQuestionsAfterAnswerConfig | null>(null)
const [speechToTextConfig, setSpeechToTextConfig] = useState<SuggestedQuestionsAfterAnswerConfig | null>(null)
const [conversationIdChangeBecauseOfNew, setConversationIdChangeBecauseOfNew, getConversationIdChangeBecauseOfNew] = useGetState(false)
const [isChatStarted, { setTrue: setChatStarted, setFalse: setChatNotStarted }] = useBoolean(false)
......@@ -295,7 +309,10 @@ const Main: FC<IMainProps> = ({
return fetchConversations(isInstalledApp, installedAppInfo?.id, undefined, undefined, 100)
}
const fetchInitData = () => {
const fetchInitData = async () => {
if (!isInstalledApp)
await checkOrSetAccessToken()
return Promise.all([isInstalledApp
? {
app_id: installedAppInfo?.id,
......@@ -326,7 +343,7 @@ const Main: FC<IMainProps> = ({
const isNotNewConversation = allConversations.some(item => item.id === _conversationId)
setAllConversationList(allConversations)
// fetch new conversation info
const { user_input_form, opening_statement: introduction, suggested_questions_after_answer }: any = appParams
const { user_input_form, opening_statement: introduction, suggested_questions_after_answer, speech_to_text }: any = appParams
const prompt_variables = userInputsFormToPromptVariables(user_input_form)
if (siteInfo.default_language)
changeLanguage(siteInfo.default_language)
......@@ -341,6 +358,7 @@ const Main: FC<IMainProps> = ({
prompt_variables,
} as PromptConfig)
setSuggestedQuestionsAfterAnswerConfig(suggested_questions_after_answer)
setSpeechToTextConfig(speech_to_text)
// setConversationList(conversations as ConversationItem[])
......@@ -620,6 +638,7 @@ const Main: FC<IMainProps> = ({
controlFocus={controlFocus}
isShowSuggestion={doShowSuggestion}
suggestionList={suggestQuestions}
isShowSpeechToText={speechToTextConfig?.enabled}
/>
</div>
</div>)
......
import type { FC } from 'react'
import React from 'react'
import Header from './header'
import type { Feedbacktype } from '@/app/components/app/chat'
import { format } from '@/service/base'
export type IResultProps = {
content: string
showFeedback: boolean
feedback: Feedbacktype
onFeedback: (feedback: Feedbacktype) => void
}
const Result: FC<IResultProps> = ({
content,
showFeedback,
feedback,
onFeedback,
}) => {
return (
<div className='basis-3/4 h-max'>
<Header result={content} showFeedback={showFeedback} feedback={feedback} onFeedback={onFeedback} />
<div
className='mt-4 w-full flex text-sm leading-5 overflow-scroll font-normal text-gray-900'
style={{
maxHeight: '70vh',
}}
dangerouslySetInnerHTML={{
__html: format(content),
}}
></div>
</div>
)
}
export default React.memo(Result)
'use client'
import type { FC } from 'react'
import React from 'react'
import Header from './header'
import { Feedbacktype } from '@/app/components/app/chat'
import { format } from '@/service/base'
import React, { useEffect, useState } from 'react'
import { useBoolean } from 'ahooks'
import { t } from 'i18next'
import cn from 'classnames'
import TextGenerationRes from '@/app/components/app/text-generate/item'
import NoData from '@/app/components/share/text-generation/no-data'
import Toast from '@/app/components/base/toast'
import { sendCompletionMessage, updateFeedback } from '@/service/share'
import type { Feedbacktype } from '@/app/components/app/chat'
import Loading from '@/app/components/base/loading'
import type { PromptConfig } from '@/models/debug'
import type { InstalledApp } from '@/models/explore'
export type IResultProps = {
content: string
showFeedback: boolean
feedback: Feedbacktype
onFeedback: (feedback: Feedbacktype) => void
isCallBatchAPI: boolean
isPC: boolean
isMobile: boolean
isInstalledApp: boolean
installedAppInfo?: InstalledApp
promptConfig: PromptConfig | null
moreLikeThisEnabled: boolean
inputs: Record<string, any>
query: string
controlSend?: number
controlStopResponding?: number
onShowRes: () => void
handleSaveMessage: (messageId: string) => void
taskId?: number
onCompleted: (taskId?: number, success?: boolean) => void
}
const Result: FC<IResultProps> = ({
content,
showFeedback,
feedback,
onFeedback
isCallBatchAPI,
isPC,
isMobile,
isInstalledApp,
installedAppInfo,
promptConfig,
moreLikeThisEnabled,
inputs,
query,
controlSend,
controlStopResponding,
onShowRes,
handleSaveMessage,
taskId,
onCompleted,
}) => {
const [isResponsing, { setTrue: setResponsingTrue, setFalse: setResponsingFalse }] = useBoolean(false)
useEffect(() => {
if (controlStopResponding)
setResponsingFalse()
}, [controlStopResponding])
const [completionRes, setCompletionRes] = useState('')
const { notify } = Toast
const isNoData = !completionRes
const [messageId, setMessageId] = useState<string | null>(null)
const [feedback, setFeedback] = useState<Feedbacktype>({
rating: null,
})
const handleFeedback = async (feedback: Feedbacktype) => {
await updateFeedback({ url: `/messages/${messageId}/feedbacks`, body: { rating: feedback.rating } }, isInstalledApp, installedAppInfo?.id)
setFeedback(feedback)
}
const logError = (message: string) => {
notify({ type: 'error', message })
}
const checkCanSend = () => {
// batch will check outer
if (isCallBatchAPI)
return true
const prompt_variables = promptConfig?.prompt_variables
if (!prompt_variables || prompt_variables?.length === 0)
return true
let hasEmptyInput = false
const requiredVars = prompt_variables?.filter(({ key, name, required }) => {
const res = (!key || !key.trim()) || (!name || !name.trim()) || (required || required === undefined || required === null)
return res
}) || [] // compatible with old version
requiredVars.forEach(({ key }) => {
if (hasEmptyInput)
return
if (!inputs[key])
hasEmptyInput = true
})
if (hasEmptyInput) {
logError(t('appDebug.errorMessage.valueOfVarRequired'))
return false
}
return !hasEmptyInput
}
const handleSend = async () => {
if (isResponsing) {
notify({ type: 'info', message: t('appDebug.errorMessage.waitForResponse') })
return false
}
if (!checkCanSend())
return
if (!query) {
logError(t('appDebug.errorMessage.queryRequired'))
return false
}
const data = {
inputs,
query,
}
setMessageId(null)
setFeedback({
rating: null,
})
setCompletionRes('')
const res: string[] = []
let tempMessageId = ''
if (!isPC)
onShowRes()
setResponsingTrue()
sendCompletionMessage(data, {
onData: (data: string, _isFirstMessage: boolean, { messageId }: any) => {
tempMessageId = messageId
res.push(data)
setCompletionRes(res.join(''))
},
onCompleted: () => {
setResponsingFalse()
setMessageId(tempMessageId)
onCompleted(taskId, true)
},
onError() {
setResponsingFalse()
onCompleted(taskId, false)
},
}, isInstalledApp, installedAppInfo?.id)
}
const [controlClearMoreLikeThis, setControlClearMoreLikeThis] = useState(0)
useEffect(() => {
if (controlSend) {
handleSend()
setControlClearMoreLikeThis(Date.now())
}
}, [controlSend])
const renderTextGenerationRes = () => (
<TextGenerationRes
className='mt-3'
content={completionRes}
messageId={messageId}
isInWebApp
moreLikeThis={moreLikeThisEnabled}
onFeedback={handleFeedback}
feedback={feedback}
onSave={handleSaveMessage}
isMobile={isMobile}
isInstalledApp={isInstalledApp}
installedAppId={installedAppInfo?.id}
isLoading={isCallBatchAPI ? (!completionRes && isResponsing) : false}
taskId={isCallBatchAPI ? ((taskId as number) < 10 ? `0${taskId}` : `${taskId}`) : undefined}
controlClearMoreLikeThis={controlClearMoreLikeThis}
/>
)
return (
<div className='basis-3/4 h-max'>
<Header result={content} showFeedback={showFeedback} feedback={feedback} onFeedback={onFeedback} />
<div
className='mt-4 w-full flex text-sm leading-5 overflow-scroll font-normal text-gray-900'
style={{
maxHeight: '70vh'
}}
dangerouslySetInnerHTML={{
__html: format(content)
}}
></div>
<div className={cn(isNoData && !isCallBatchAPI && 'h-full')}>
{!isCallBatchAPI && (
(isResponsing && !completionRes)
? (
<div className='flex h-full w-full justify-center items-center'>
<Loading type='area' />
</div>)
: (
<>
{isNoData
? <NoData />
: renderTextGenerationRes()
}
</>
)
)}
{isCallBatchAPI && (
<div className='mt-2'>
{renderTextGenerationRes()}
</div>
)}
</div>
)
}
......
'use client'
import type { FC } from 'react'
import React from 'react'
import {
useCSVDownloader,
} from 'react-papaparse'
import { useTranslation } from 'react-i18next'
import { Download02 as DownloadIcon } from '@/app/components/base/icons/src/vender/solid/general'
export type ICSVDownloadProps = {
vars: { name: string }[]
}
const CSVDownload: FC<ICSVDownloadProps> = ({
vars,
}) => {
const { t } = useTranslation()
const { CSVDownloader, Type } = useCSVDownloader()
const addQueryContentVars = [...vars, { name: t('share.generation.queryTitle') }]
const template = (() => {
const res: Record<string, string> = {}
addQueryContentVars.forEach((item) => {
res[item.name] = ''
})
return res
})()
return (
<div className='mt-6'>
<div className='text-sm text-gray-900 font-medium'>{t('share.generation.csvStructureTitle')}</div>
<div className='mt-2 max-h-[500px] overflow-auto'>
<table className='w-full border-separate border-spacing-0 border border-gray-200 rounded-lg text-xs'>
<thead className='text-gray-500'>
<tr>
{addQueryContentVars.map((item, i) => (
<td key={i} className='h-9 pl-4 border-b border-gray-200'>{item.name}</td>
))}
</tr>
</thead>
<tbody className='text-gray-300'>
<tr>
{addQueryContentVars.map((item, i) => (
<td key={i} className='h-9 pl-4'>{item.name} {t('share.generation.field')}</td>
))}
</tr>
</tbody>
</table>
</div>
<CSVDownloader
className="block mt-2 cursor-pointer"
type={Type.Link}
filename={'template'}
bom={true}
config={{
// delimiter: ';',
}}
data={[
template,
]}
>
<div className='flex items-center h-[18px] space-x-1 text-[#155EEF] text-xs font-medium'>
<DownloadIcon className='w-3 h-3' />
<span>{t('share.generation.downloadTemplate')}</span>
</div>
</CSVDownloader>
</div>
)
}
export default React.memo(CSVDownload)
'use client'
import type { FC } from 'react'
import React, { useState } from 'react'
import {
useCSVReader,
} from 'react-papaparse'
import cn from 'classnames'
import { useTranslation } from 'react-i18next'
import s from './style.module.css'
import { Csv as CSVIcon } from '@/app/components/base/icons/src/public/files'
export type Props = {
onParsed: (data: string[][]) => void
}
const CSVReader: FC<Props> = ({
onParsed,
}) => {
const { t } = useTranslation()
const { CSVReader } = useCSVReader()
const [zoneHover, setZoneHover] = useState(false)
return (
<CSVReader
onUploadAccepted={(results: any) => {
onParsed(results.data)
setZoneHover(false)
}}
onDragOver={(event: DragEvent) => {
event.preventDefault()
setZoneHover(true)
}}
onDragLeave={(event: DragEvent) => {
event.preventDefault()
setZoneHover(false)
}}
>
{({
getRootProps,
acceptedFile,
}: any) => (
<>
<div
{...getRootProps()}
className={cn(s.zone, zoneHover && s.zoneHover, acceptedFile ? 'px-6' : 'justify-center border-dashed text-gray-500')}
>
{
acceptedFile
? (
<div className='w-full flex items-center space-x-2'>
<CSVIcon className="shrink-0" />
<div className='flex w-0 grow'>
<span className='max-w-[calc(100%_-_30px)] text-ellipsis whitespace-nowrap overflow-hidden text-gray-800'>{acceptedFile.name.replace(/.csv$/, '')}</span>
<span className='shrink-0 text-gray-500'>.csv</span>
</div>
</div>
)
: (
<div className='flex items-center justify-center space-x-2'>
<CSVIcon className="shrink-0" />
<div className='text-gray-500'>{t('share.generation.csvUploadTitle')}<span className='text-primary-400'>{t('share.generation.browse')}</span></div>
</div>
)}
</div>
</>
)}
</CSVReader>
)
}
export default React.memo(CSVReader)
.zone {
@apply flex items-center h-20 rounded-xl bg-gray-50 border border-gray-200 cursor-pointer text-sm font-normal;
}
.zoneHover {
@apply border-solid bg-gray-100;
}
.info {
@apply text-gray-800 text-sm;
}
\ No newline at end of file
'use client'
import type { FC } from 'react'
import React from 'react'
import {
PlayIcon,
} from '@heroicons/react/24/solid'
import { useTranslation } from 'react-i18next'
import CSVReader from './csv-reader'
import CSVDownload from './csv-download'
import Button from '@/app/components/base/button'
export type IRunBatchProps = {
vars: { name: string }[]
onSend: (data: string[][]) => void
}
const RunBatch: FC<IRunBatchProps> = ({
vars,
onSend,
}) => {
const { t } = useTranslation()
const [csvData, setCsvData] = React.useState<string[][]>([])
const [isParsed, setIsParsed] = React.useState(false)
const handleParsed = (data: string[][]) => {
setCsvData(data)
// console.log(data)
setIsParsed(true)
}
const handleSend = () => {
onSend(csvData)
}
return (
<div className='pt-4'>
<CSVReader onParsed={handleParsed} />
<CSVDownload vars={vars} />
<div className='mt-4 h-[1px] bg-gray-100'></div>
<div className='flex justify-end'>
<Button
type="primary"
className='mt-4 !h-8 !pl-3 !pr-4'
onClick={handleSend}
disabled={!isParsed}
>
<PlayIcon className="shrink-0 w-4 h-4 mr-1" aria-hidden="true" />
<span className='uppercase text-[13px]'>{t('share.generation.run')}</span>
</Button>
</div>
</div>
)
}
export default React.memo(RunBatch)
......@@ -10,7 +10,7 @@ import type { PromptConfig } from '@/models/debug'
import Button from '@/app/components/base/button'
import { DEFAULT_VALUE_MAX_LEN } from '@/config'
export type IConfigSenceProps = {
export type IRunOnceProps = {
siteInfo: SiteInfo
promptConfig: PromptConfig
inputs: Record<string, any>
......@@ -19,7 +19,7 @@ export type IConfigSenceProps = {
onQueryChange: (query: string) => void
onSend: () => void
}
const ConfigSence: FC<IConfigSenceProps> = ({
const RunOnce: FC<IRunOnceProps> = ({
promptConfig,
inputs,
onInputsChange,
......@@ -85,7 +85,7 @@ const ConfigSence: FC<IConfigSenceProps> = ({
</div>
<Button
type="primary"
className='w-[80px] !h-8 !p-0'
className='!h-8 !pl-3 !pr-4'
onClick={onSend}
disabled={!query || query === ''}
>
......@@ -100,4 +100,4 @@ const ConfigSence: FC<IConfigSenceProps> = ({
</div>
)
}
export default React.memo(ConfigSence)
export default React.memo(RunOnce)
import { fetchAccessToken } from '@/service/share'
export const checkOrSetAccessToken = async () => {
const sharedToken = globalThis.location.pathname.split('/').slice(-1)[0]
const accessToken = localStorage.getItem('token') || JSON.stringify({ [sharedToken]: '' })
let accessTokenJson = { [sharedToken]: '' }
try {
accessTokenJson = JSON.parse(accessToken)
}
catch (e) {
}
if (!accessTokenJson[sharedToken]) {
const res = await fetchAccessToken(sharedToken)
accessTokenJson[sharedToken] = res.access_token
localStorage.setItem('token', JSON.stringify(accessTokenJson))
}
}
import { createContext } from 'use-context-selector'
import type { CompletionParams, Inputs, ModelConfig, MoreLikeThisConfig, PromptConfig, SuggestedQuestionsAfterAnswerConfig } from '@/models/debug'
import type { CompletionParams, Inputs, ModelConfig, MoreLikeThisConfig, PromptConfig, SpeechToTextConfig, SuggestedQuestionsAfterAnswerConfig } from '@/models/debug'
import type { DataSet } from '@/models/datasets'
type IDebugConfiguration = {
......@@ -19,6 +19,8 @@ type IDebugConfiguration = {
setMoreLikeThisConfig: (moreLikeThisConfig: MoreLikeThisConfig) => void
suggestedQuestionsAfterAnswerConfig: SuggestedQuestionsAfterAnswerConfig
setSuggestedQuestionsAfterAnswerConfig: (suggestedQuestionsAfterAnswerConfig: SuggestedQuestionsAfterAnswerConfig) => void
speechToTextConfig: SpeechToTextConfig
setSpeechToTextConfig: (speechToTextConfig: SpeechToTextConfig) => void
formattingChanged: boolean
setFormattingChanged: (formattingChanged: boolean) => void
inputs: Inputs
......@@ -59,6 +61,10 @@ const DebugConfigurationContext = createContext<IDebugConfiguration>({
enabled: false,
},
setSuggestedQuestionsAfterAnswerConfig: () => { },
speechToTextConfig: {
enabled: false,
},
setSpeechToTextConfig: () => { },
formattingChanged: false,
setFormattingChanged: () => { },
inputs: {},
......
......@@ -46,6 +46,11 @@ const translation = {
generateNumTip: 'Number of each generated times',
tip: 'Using this feature will incur additional tokens overhead',
},
speechToText: {
title: 'Speech to Text',
description: 'Once enabled, you can use voice input.',
resDes: 'Voice input is enabled',
},
dataSet: {
title: 'Context',
noData: 'You can import datasets as context',
......@@ -86,6 +91,8 @@ const translation = {
queryRequired: 'Request text is required.',
waitForResponse:
'Please wait for the response to the previous message to complete.',
waitForBatchResponse:
'Please wait for the response to the batch task to complete.',
},
chatSubTitle: 'Pre Prompt',
completionSubTitle: 'Prefix Prompt',
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment