Commit 5bc7a783 authored by John Wang's avatar John Wang

Merge branch 'fix/azure-completion-choices-empty' into deploy/dev

parents f591699d af4ba81e
from langchain.callbacks.manager import Callbacks
from langchain.callbacks.manager import Callbacks, CallbackManagerForLLMRun
from langchain.llms import AzureOpenAI
from langchain.llms.openai import _streaming_response_template, completion_with_retry, _update_response, \
update_token_usage
from langchain.schema import LLMResult
from typing import Optional, List, Dict, Mapping, Any, Union, Tuple
......@@ -67,3 +69,58 @@ class StreamableAzureOpenAI(AzureOpenAI):
@classmethod
def get_kwargs_from_model_params(cls, params: dict):
return params
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
params["stream"] = True
response = _streaming_response_template()
for stream_resp in completion_with_retry(
self, prompt=_prompts, **params
):
if len(stream_resp["choices"]) > 0:
if run_manager:
run_manager.on_llm_new_token(
stream_resp["choices"][0]["text"],
verbose=self.verbose,
logprobs=stream_resp["choices"][0]["logprobs"],
)
_update_response(response, stream_resp)
choices.extend(response["choices"])
else:
response = completion_with_retry(self, prompt=_prompts, **params)
choices.extend(response["choices"])
if not self.streaming:
# Can't update token usage if streaming
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(choices, prompts, token_usage)
\ No newline at end of file
......@@ -4,7 +4,6 @@ import {
} from '@heroicons/react/24/outline'
import Tooltip from '../base/tooltip'
import AppIcon from '../base/app-icon'
const chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_'
export function randomString(length: number) {
......@@ -21,6 +20,7 @@ export type IAppBasicProps = {
type: string | React.ReactNode
hoverTip?: string
textStyle?: { main?: string; extra?: string }
isExtraInLine?: boolean
}
const ApiSvg = <svg width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
......@@ -61,7 +61,7 @@ const ICON_MAP = {
notion: <AppIcon innerIcon={NotionSvg} className='!border-[0.5px] !border-indigo-100 !bg-white' />,
}
export default function AppBasic({ icon, icon_background, name, type, hoverTip, textStyle, iconType = 'app' }: IAppBasicProps) {
export default function AppBasic({ icon, icon_background, name, type, hoverTip, textStyle, iconType = 'app', isExtraInLine }: IAppBasicProps) {
return (
<div className="flex items-start">
{icon && icon_background && iconType === 'app' && (
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment