Commit cce70695 authored by crazywoola's avatar crazywoola

resolve: conflict

parents 638f2178 0d92bb07
......@@ -79,7 +79,7 @@ class Config:
self.CONSOLE_URL = get_env('CONSOLE_URL')
self.API_URL = get_env('API_URL')
self.APP_URL = get_env('APP_URL')
self.CURRENT_VERSION = "0.3.5"
self.CURRENT_VERSION = "0.3.6"
self.COMMIT_SHA = get_env('COMMIT_SHA')
self.EDITION = "SELF_HOSTED"
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
......
......@@ -2,7 +2,7 @@ import logging
from langchain import PromptTemplate
from langchain.chat_models.base import BaseChatModel
from langchain.schema import HumanMessage, OutputParserException
from langchain.schema import HumanMessage, OutputParserException, BaseMessage
from core.constant import llm_constant
from core.llm.llm_builder import LLMBuilder
......@@ -23,10 +23,10 @@ class LLMGenerator:
@classmethod
def generate_conversation_name(cls, tenant_id: str, query, answer):
prompt = CONVERSATION_TITLE_PROMPT
prompt = prompt.format(query=query, answer=answer)
prompt = prompt.format(query=query)
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name=generate_base_model,
model_name='gpt-3.5-turbo',
max_tokens=50
)
......@@ -40,11 +40,12 @@ class LLMGenerator:
@classmethod
def generate_conversation_summary(cls, tenant_id: str, messages):
max_tokens = 200
model = 'gpt-3.5-turbo'
prompt = CONVERSATION_SUMMARY_PROMPT
prompt_with_empty_context = prompt.format(context='')
prompt_tokens = TokenCalculator.get_num_tokens(generate_base_model, prompt_with_empty_context)
rest_tokens = llm_constant.max_context_token_length[generate_base_model] - prompt_tokens - max_tokens
prompt_tokens = TokenCalculator.get_num_tokens(model, prompt_with_empty_context)
rest_tokens = llm_constant.max_context_token_length[model] - prompt_tokens - max_tokens
context = ''
for message in messages:
......@@ -52,14 +53,14 @@ class LLMGenerator:
continue
message_qa_text = "Human:" + message.query + "\nAI:" + message.answer + "\n"
if rest_tokens - TokenCalculator.get_num_tokens(generate_base_model, context + message_qa_text) > 0:
if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0:
context += message_qa_text
prompt = prompt.format(context=context)
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name=generate_base_model,
model_name=model,
max_tokens=max_tokens
)
......@@ -102,7 +103,7 @@ class LLMGenerator:
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name=generate_base_model,
model_name='gpt-3.5-turbo',
temperature=0,
max_tokens=256
)
......@@ -114,6 +115,8 @@ class LLMGenerator:
try:
output = llm(query)
if isinstance(output, BaseMessage):
output = output.content
questions = output_parser.parse(output)
except Exception:
logging.exception("Error generating suggested questions after answer")
......
......@@ -346,10 +346,10 @@ class IndexingRunner:
return text_docs
def filter_string(self, text):
text = text.replace('<|', '<')
text = text.replace('|>', '>')
pattern = re.compile('[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\x80-\xFF]')
return pattern.sub('', text)
text = re.sub(r'<\|', '<', text)
text = re.sub(r'\|>', '>', text)
text = re.sub(r'[\x00-\x08\x0B\x0C\x0E-\x1F\x7F\x80-\xFF]', '', text)
return text
def _get_splitter(self, processing_rule: DatasetProcessRule) -> TextSplitter:
"""
......
CONVERSATION_TITLE_PROMPT = (
"Human:{{query}}\n-----\n"
"Human:{query}\n-----\n"
"Help me summarize the intent of what the human said and provide a title, the title should not exceed 20 words.\n"
"If the human said is conducted in Chinese, you should return a Chinese title.\n"
"If the human said is conducted in English, you should return an English title.\n"
......@@ -19,7 +19,7 @@ CONVERSATION_SUMMARY_PROMPT = (
INTRODUCTION_GENERATE_PROMPT = (
"I am designing a product for users to interact with an AI through dialogue. "
"The Prompt given to the AI before the conversation is:\n\n"
"```\n{{prompt}}\n```\n\n"
"```\n{prompt}\n```\n\n"
"Please generate a brief introduction of no more than 50 words that greets the user, based on this Prompt. "
"Do not reveal the developer's motivation or deep logic behind the Prompt, "
"but focus on building a relationship with the user:\n"
......@@ -27,13 +27,13 @@ INTRODUCTION_GENERATE_PROMPT = (
MORE_LIKE_THIS_GENERATE_PROMPT = (
"-----\n"
"{{original_completion}}\n"
"{original_completion}\n"
"-----\n\n"
"Please use the above content as a sample for generating the result, "
"and include key information points related to the original sample in the result. "
"Try to rephrase this information in different ways and predict according to the rules below.\n\n"
"-----\n"
"{{prompt}}\n"
"{prompt}\n"
)
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (
......
......@@ -2,7 +2,7 @@ version: '3.1'
services:
# API service
api:
image: langgenius/dify-api:0.3.5
image: langgenius/dify-api:0.3.6
restart: always
environment:
# Startup mode, 'api' starts the API server.
......@@ -110,7 +110,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.3.5
image: langgenius/dify-api:0.3.6
restart: always
environment:
# Startup mode, 'worker' starts the Celery worker for processing the queue.
......@@ -156,7 +156,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.3.5
image: langgenius/dify-web:0.3.6
restart: always
environment:
EDITION: SELF_HOSTED
......
......@@ -83,17 +83,6 @@ const SettingsModal: FC<ISettingsModalProps> = ({
return (
<>
{showEmojiPicker && <EmojiPicker
onSelect={(icon, icon_background) => {
console.log(icon, icon_background)
setEmoji({ icon, icon_background })
setShowEmojiPicker(false)
}}
onClose={() => {
setEmoji({ icon: '🤖', icon_background: '#FFEAD5' })
setShowEmojiPicker(false)
}}
/>}
<Modal
title={t(`${prefixSettings}.title`)}
isShow={isShow}
......@@ -161,6 +150,17 @@ const SettingsModal: FC<ISettingsModalProps> = ({
<Button className='mr-2 flex-shrink-0' onClick={onHide}>{t('common.operation.cancel')}</Button>
<Button type='primary' className='flex-shrink-0' onClick={onClickSave} loading={saveLoading}>{t('common.operation.save')}</Button>
</div>
{showEmojiPicker && <EmojiPicker
onSelect={(icon, icon_background) => {
console.log(icon, icon_background)
setEmoji({ icon, icon_background })
setShowEmojiPicker(false)
}}
onClose={() => {
setEmoji({ icon: '🤖', icon_background: '#FFEAD5' })
setShowEmojiPicker(false)
}}
/>}
</Modal >
</>
......
{
"name": "dify-web",
"version": "0.3.5",
"version": "0.3.6",
"private": true,
"scripts": {
"dev": "next dev",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment