Commit 0d858cc0 authored by takatost's avatar takatost

add app convert codes

parent f0679472
......@@ -21,7 +21,7 @@ from fields.conversation_fields import (
)
from libs.helper import datetime_string
from libs.login import login_required
from models.model import Conversation, Message, MessageAnnotation, AppMode
from models.model import AppMode, Conversation, Message, MessageAnnotation
class CompletionConversationApi(Resource):
......
......@@ -26,7 +26,7 @@ from fields.conversation_fields import annotation_fields, message_detail_fields
from libs.helper import uuid_value
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from libs.login import login_required
from models.model import Conversation, Message, MessageAnnotation, MessageFeedback, AppMode
from models.model import AppMode, Conversation, Message, MessageAnnotation, MessageFeedback
from services.annotation_service import AppAnnotationService
from services.errors.conversation import ConversationNotExistsError
from services.errors.message import MessageNotExistsError
......
from flask_restful import Resource, reqparse, marshal_with
from flask_restful import Resource, marshal_with, reqparse
from controllers.console import api
from controllers.console.app.error import DraftWorkflowNotExist
......@@ -6,8 +6,8 @@ from controllers.console.app.wraps import get_app_model
from controllers.console.setup import setup_required
from controllers.console.wraps import account_initialization_required
from fields.workflow_fields import workflow_fields
from libs.login import login_required, current_user
from models.model import App, ChatbotAppEngine, AppMode
from libs.login import current_user, login_required
from models.model import App, AppMode, ChatbotAppEngine
from services.workflow_service import WorkflowService
......
......@@ -5,7 +5,7 @@ from typing import Optional, Union
from controllers.console.app.error import AppNotFoundError
from extensions.ext_database import db
from libs.login import current_user
from models.model import App, ChatbotAppEngine, AppMode
from models.model import App, AppMode, ChatbotAppEngine
def get_app_model(view: Optional[Callable] = None, *,
......
......@@ -22,7 +22,7 @@ from core.model_runtime.entities.message_entities import AssistantPromptMessage,
from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.errors.invoke import InvokeBadRequestError
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.prompt.prompt_transform import PromptTransform
from core.prompt.simple_prompt_transform import SimplePromptTransform
from models.model import App, Message, MessageAnnotation
......@@ -140,12 +140,11 @@ class AppRunner:
:param memory: memory
:return:
"""
prompt_transform = PromptTransform()
prompt_transform = SimplePromptTransform()
# get prompt without memory and context
if prompt_template_entity.prompt_type == PromptTemplateEntity.PromptType.SIMPLE:
prompt_messages, stop = prompt_transform.get_prompt(
app_mode=app_record.mode,
prompt_template_entity=prompt_template_entity,
inputs=inputs,
query=query if query else '',
......@@ -155,17 +154,7 @@ class AppRunner:
model_config=model_config
)
else:
prompt_messages = prompt_transform.get_advanced_prompt(
app_mode=app_record.mode,
prompt_template_entity=prompt_template_entity,
inputs=inputs,
query=query,
files=files,
context=context,
memory=memory,
model_config=model_config
)
stop = model_config.stop
raise NotImplementedError("Advanced prompt is not supported yet.")
return prompt_messages, stop
......
......@@ -15,7 +15,7 @@ from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_manager import ModelInstance
from core.moderation.base import ModerationException
from extensions.ext_database import db
from models.model import App, Conversation, Message, AppMode
from models.model import App, AppMode, Conversation, Message
logger = logging.getLogger(__name__)
......
......@@ -28,7 +28,8 @@ from core.entities.application_entities import (
ModelConfigEntity,
PromptTemplateEntity,
SensitiveWordAvoidanceEntity,
TextToSpeechEntity, VariableEntity,
TextToSpeechEntity,
VariableEntity,
)
from core.entities.model_entities import ModelStatus
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
......@@ -541,8 +542,7 @@ class ApplicationManager:
query_variable=query_variable,
retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.value_of(
dataset_configs['retrieval_model']
),
single_strategy=datasets.get('strategy', 'router')
)
)
)
else:
......
......@@ -156,7 +156,6 @@ class DatasetRetrieveConfigEntity(BaseModel):
query_variable: Optional[str] = None # Only when app mode is completion
retrieve_strategy: RetrieveStrategy
single_strategy: Optional[str] = None # for temp
top_k: Optional[int] = None
score_threshold: Optional[float] = None
reranking_model: Optional[dict] = None
......
from typing import Optional
from core.entities.application_entities import PromptTemplateEntity, ModelConfigEntity, \
AdvancedCompletionPromptTemplateEntity
from core.file.file_obj import FileObj
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageRole, UserPromptMessage, \
SystemPromptMessage, AssistantPromptMessage, TextPromptMessageContent
from core.prompt.prompt_template import PromptTemplateParser
from core.prompt.prompt_transform import PromptTransform
from core.prompt.simple_prompt_transform import ModelMode
class AdvancePromptTransform(PromptTransform):
"""
Advanced Prompt Transform for Workflow LLM Node.
"""
def get_prompt(self, prompt_template_entity: PromptTemplateEntity,
inputs: dict,
query: str,
files: list[FileObj],
context: Optional[str],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigEntity) -> list[PromptMessage]:
prompt_messages = []
model_mode = ModelMode.value_of(model_config.mode)
if model_mode == ModelMode.COMPLETION:
prompt_messages = self._get_completion_model_prompt_messages(
prompt_template_entity=prompt_template_entity,
inputs=inputs,
files=files,
context=context,
memory=memory,
model_config=model_config
)
elif model_mode == ModelMode.CHAT:
prompt_messages = self._get_chat_model_prompt_messages(
prompt_template_entity=prompt_template_entity,
inputs=inputs,
query=query,
files=files,
context=context,
memory=memory,
model_config=model_config
)
return prompt_messages
def _get_completion_model_prompt_messages(self,
prompt_template_entity: PromptTemplateEntity,
inputs: dict,
files: list[FileObj],
context: Optional[str],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigEntity) -> list[PromptMessage]:
"""
Get completion model prompt messages.
"""
raw_prompt = prompt_template_entity.advanced_completion_prompt_template.prompt
prompt_messages = []
prompt_template = PromptTemplateParser(template=raw_prompt)
prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs}
self._set_context_variable(context, prompt_template, prompt_inputs)
role_prefix = prompt_template_entity.advanced_completion_prompt_template.role_prefix
self._set_histories_variable(
memory=memory,
raw_prompt=raw_prompt,
role_prefix=role_prefix,
prompt_template=prompt_template,
prompt_inputs=prompt_inputs,
model_config=model_config
)
prompt = prompt_template.format(
prompt_inputs
)
if files:
prompt_message_contents = [TextPromptMessageContent(data=prompt)]
for file in files:
prompt_message_contents.append(file.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:
prompt_messages.append(UserPromptMessage(content=prompt))
return prompt_messages
def _get_chat_model_prompt_messages(self,
prompt_template_entity: PromptTemplateEntity,
inputs: dict,
query: str,
files: list[FileObj],
context: Optional[str],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigEntity) -> list[PromptMessage]:
"""
Get chat model prompt messages.
"""
raw_prompt_list = prompt_template_entity.advanced_chat_prompt_template.messages
prompt_messages = []
for prompt_item in raw_prompt_list:
raw_prompt = prompt_item.text
prompt_template = PromptTemplateParser(template=raw_prompt)
prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs}
self._set_context_variable(context, prompt_template, prompt_inputs)
prompt = prompt_template.format(
prompt_inputs
)
if prompt_item.role == PromptMessageRole.USER:
prompt_messages.append(UserPromptMessage(content=prompt))
elif prompt_item.role == PromptMessageRole.SYSTEM and prompt:
prompt_messages.append(SystemPromptMessage(content=prompt))
elif prompt_item.role == PromptMessageRole.ASSISTANT:
prompt_messages.append(AssistantPromptMessage(content=prompt))
if memory:
self._append_chat_histories(memory, prompt_messages, model_config)
if files:
prompt_message_contents = [TextPromptMessageContent(data=query)]
for file in files:
prompt_message_contents.append(file.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:
prompt_messages.append(UserPromptMessage(content=query))
elif files:
# get last message
last_message = prompt_messages[-1] if prompt_messages else None
if last_message and last_message.role == PromptMessageRole.USER:
# get last user message content and add files
prompt_message_contents = [TextPromptMessageContent(data=last_message.content)]
for file in files:
prompt_message_contents.append(file.prompt_message_content)
last_message.content = prompt_message_contents
else:
prompt_message_contents = [TextPromptMessageContent(data=query)]
for file in files:
prompt_message_contents.append(file.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
return prompt_messages
def _set_context_variable(self, context: str, prompt_template: PromptTemplateParser, prompt_inputs: dict) -> None:
if '#context#' in prompt_template.variable_keys:
if context:
prompt_inputs['#context#'] = context
else:
prompt_inputs['#context#'] = ''
def _set_query_variable(self, query: str, prompt_template: PromptTemplateParser, prompt_inputs: dict) -> None:
if '#query#' in prompt_template.variable_keys:
if query:
prompt_inputs['#query#'] = query
else:
prompt_inputs['#query#'] = ''
def _set_histories_variable(self, memory: TokenBufferMemory,
raw_prompt: str,
role_prefix: AdvancedCompletionPromptTemplateEntity.RolePrefixEntity,
prompt_template: PromptTemplateParser,
prompt_inputs: dict,
model_config: ModelConfigEntity) -> None:
if '#histories#' in prompt_template.variable_keys:
if memory:
inputs = {'#histories#': '', **prompt_inputs}
prompt_template = PromptTemplateParser(raw_prompt)
prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs}
tmp_human_message = UserPromptMessage(
content=prompt_template.format(prompt_inputs)
)
rest_tokens = self._calculate_rest_token([tmp_human_message], model_config)
histories = self._get_history_messages_from_memory(
memory=memory,
max_token_limit=rest_tokens,
human_prefix=role_prefix.user,
ai_prefix=role_prefix.assistant
)
prompt_inputs['#histories#'] = histories
else:
prompt_inputs['#histories#'] = ''
{
"human_prefix": "用户",
"assistant_prefix": "助手",
"context_prompt": "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{context}}\n```\n\n",
"histories_prompt": "用户和助手的历史对话内容如下:\n```\n{{histories}}\n```\n\n",
"context_prompt": "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n",
"histories_prompt": "用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n",
"system_prompt_orders": [
"context_prompt",
"pre_prompt",
"histories_prompt"
],
"query_prompt": "\n\n用户:{{query}}",
"query_prompt": "\n\n用户:{{#query#}}",
"stops": ["用户:"]
}
\ No newline at end of file
{
"context_prompt": "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{context}}\n```\n",
"context_prompt": "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n",
"system_prompt_orders": [
"context_prompt",
"pre_prompt"
],
"query_prompt": "{{query}}",
"query_prompt": "{{#query#}}",
"stops": null
}
\ No newline at end of file
{
"human_prefix": "Human",
"assistant_prefix": "Assistant",
"context_prompt": "Use the following context as your learned knowledge, inside <context></context> XML tags.\n\n<context>\n{{context}}\n</context>\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n\n",
"histories_prompt": "Here is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{histories}}\n</histories>\n\n",
"context_prompt": "Use the following context as your learned knowledge, inside <context></context> XML tags.\n\n<context>\n{{#context#}}\n</context>\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n\n",
"histories_prompt": "Here is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n",
"system_prompt_orders": [
"context_prompt",
"pre_prompt",
"histories_prompt"
],
"query_prompt": "\n\nHuman: {{query}}\n\nAssistant: ",
"query_prompt": "\n\nHuman: {{#query#}}\n\nAssistant: ",
"stops": ["\nHuman:", "</histories>"]
}
{
"context_prompt": "Use the following context as your learned knowledge, inside <context></context> XML tags.\n\n<context>\n{{context}}\n</context>\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n\n",
"context_prompt": "Use the following context as your learned knowledge, inside <context></context> XML tags.\n\n<context>\n{{#context#}}\n</context>\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n\n",
"system_prompt_orders": [
"context_prompt",
"pre_prompt"
],
"query_prompt": "{{query}}",
"query_prompt": "{{#query#}}",
"stops": null
}
\ No newline at end of file
from core.prompt.prompt_template import PromptTemplateParser
class PromptBuilder:
@classmethod
def parse_prompt(cls, prompt: str, inputs: dict) -> str:
prompt_template = PromptTemplateParser(prompt)
prompt_inputs = {k: inputs[k] for k in prompt_template.variable_keys if k in inputs}
prompt = prompt_template.format(prompt_inputs)
return prompt
......@@ -32,7 +32,8 @@ class PromptTemplateParser:
return PromptTemplateParser.remove_template_variables(value)
return value
return re.sub(REGEX, replacer, self.template)
prompt = re.sub(REGEX, replacer, self.template)
return re.sub(r'<\|.*?\|>', '', prompt)
@classmethod
def remove_template_variables(cls, text: str):
......
This diff is collapsed.
This diff is collapsed.
......@@ -2,7 +2,6 @@ from flask_restful import fields
from libs.helper import TimestampField
annotation_fields = {
"id": fields.String,
"question": fields.String,
......
......@@ -5,7 +5,6 @@ from flask_restful import fields
from fields.member_fields import simple_account_fields
from libs.helper import TimestampField
workflow_fields = {
'id': fields.String,
'graph': fields.Raw(attribute=lambda x: json.loads(x.graph) if hasattr(x, 'graph') else None),
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment