Unverified Commit 07fe10d1 authored by Yeuoly's avatar Yeuoly

feat: upload image

parent e8210ef7
......@@ -3,7 +3,7 @@ from typing import Generator, List, Optional, Tuple, Union, cast
from core.application_queue_manager import ApplicationQueueManager, PublishFrom
from core.entities.application_entities import AppOrchestrationConfigEntity, ModelConfigEntity, \
PromptTemplateEntity, ExternalDataVariableEntity, ApplicationGenerateEntity, InvokeFrom
PromptTemplateEntity, ExternalDataVariableEntity, ApplicationGenerateEntity, InvokeFrom, AgentEntity
from core.file.file_obj import FileObj
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
......@@ -57,6 +57,7 @@ class AppRunner:
prompt_messages, stop = self.organize_prompt_messages(
app_record=app_record,
model_config=model_config,
agent_config=None,
prompt_template_entity=prompt_template_entity,
inputs=inputs,
files=files,
......@@ -116,6 +117,7 @@ class AppRunner:
prompt_template_entity: PromptTemplateEntity,
inputs: dict[str, str],
files: list[FileObj],
agent_config: Optional[AgentEntity] = None,
query: Optional[str] = None,
context: Optional[str] = None,
memory: Optional[TokenBufferMemory] = None) \
......@@ -144,7 +146,8 @@ class AppRunner:
files=files,
context=context,
memory=memory,
model_config=model_config
model_config=model_config,
agent_config=agent_config
)
else:
prompt_messages = prompt_transform.get_advanced_prompt(
......
......@@ -80,6 +80,7 @@ class AssistantApplicationRunner(AppRunner):
prompt_messages, _ = self.organize_prompt_messages(
app_record=app_record,
model_config=app_orchestration_config.model_config,
agent_config=app_orchestration_config.agent,
prompt_template_entity=app_orchestration_config.prompt_template,
inputs=inputs,
files=files,
......@@ -148,6 +149,7 @@ class AssistantApplicationRunner(AppRunner):
prompt_messages, _ = self.organize_prompt_messages(
app_record=app_record,
model_config=app_orchestration_config.model_config,
agent_config=app_orchestration_config.agent,
prompt_template_entity=app_orchestration_config.prompt_template,
inputs=inputs,
files=files,
......@@ -188,6 +190,7 @@ class AssistantApplicationRunner(AppRunner):
prompt_message, _ = self.organize_prompt_messages(
app_record=app_record,
model_config=app_orchestration_config.model_config,
agent_config=app_orchestration_config.agent,
prompt_template_entity=app_orchestration_config.prompt_template,
inputs=inputs,
files=files,
......
......@@ -37,16 +37,20 @@ class TokenBufferMemory:
prompt_messages = []
for message in messages:
files = message.message_files
config = message.app_model_config
if files:
file_objs = message_file_parser.transform_message_files(
files, message.app_model_config
)
prompt_message_contents = [TextPromptMessageContent(data=message.query)]
for file_obj in file_objs:
prompt_message_contents.append(file_obj.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
if config.agent_mode_dict.get('enabled', False) and config.agent_mode_dict.get('strategy') in ['function_call', 'react', 'cot']:
prompt_messages.append(UserPromptMessage(content=message.query))
else:
file_objs = message_file_parser.transform_message_files(
files, message.app_model_config
)
prompt_message_contents = [TextPromptMessageContent(data=message.query)]
for file_obj in file_objs:
prompt_message_contents.append(file_obj.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:
prompt_messages.append(UserPromptMessage(content=message.query))
......
......@@ -5,7 +5,7 @@ import re
from typing import List, Optional, Tuple, cast
from core.entities.application_entities import (AdvancedCompletionPromptTemplateEntity, ModelConfigEntity,
PromptTemplateEntity)
PromptTemplateEntity, AgentEntity)
from core.file.file_obj import FileObj
from core.memory.token_buffer_memory import TokenBufferMemory
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessage, PromptMessageRole,
......@@ -62,7 +62,8 @@ class PromptTransform:
files: List[FileObj],
context: Optional[str],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigEntity) -> \
model_config: ModelConfigEntity,
agent_config: AgentEntity) -> \
Tuple[List[PromptMessage], Optional[List[str]]]:
app_mode = AppMode.value_of(app_mode)
model_mode = ModelMode.value_of(model_config.mode)
......@@ -84,7 +85,8 @@ class PromptTransform:
files=files,
context=context,
memory=memory,
model_config=model_config
model_config=model_config,
agent_config=agent_config
)
else:
stops = prompt_rules.get('stops')
......@@ -219,7 +221,8 @@ class PromptTransform:
context: Optional[str],
files: List[FileObj],
memory: Optional[TokenBufferMemory],
model_config: ModelConfigEntity) -> List[PromptMessage]:
model_config: ModelConfigEntity,
agent_config: Optional[AgentEntity] = None) -> List[PromptMessage]:
prompt_messages = []
context_prompt_content = ''
......@@ -256,11 +259,13 @@ class PromptTransform:
)
if files:
prompt_message_contents = [TextPromptMessageContent(data=query)]
for file in files:
prompt_message_contents.append(file.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
if agent_config and agent_config.strategy in AgentEntity.Strategy:
prompt_messages.append(UserPromptMessage(content=query))
else:
prompt_message_contents = [TextPromptMessageContent(data=query)]
for file in files:
prompt_message_contents.append(file.prompt_message_content)
prompt_messages.append(UserPromptMessage(content=prompt_message_contents))
else:
prompt_messages.append(UserPromptMessage(content=query))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment