Unverified Commit c4d759df authored by takatost's avatar takatost Committed by GitHub

fix: wenxin error not raise when stream mode (#884)

parent a58f95fa
...@@ -3,6 +3,7 @@ from __future__ import annotations ...@@ -3,6 +3,7 @@ from __future__ import annotations
import json import json
import logging import logging
from json import JSONDecodeError
from typing import ( from typing import (
Any, Any,
Dict, Dict,
...@@ -223,6 +224,8 @@ class Wenxin(LLM): ...@@ -223,6 +224,8 @@ class Wenxin(LLM):
for token in self._client.post(request).iter_lines(): for token in self._client.post(request).iter_lines():
if token: if token:
token = token.decode("utf-8") token = token.decode("utf-8")
if token.startswith('data:'):
completion = json.loads(token[5:]) completion = json.loads(token[5:])
yield GenerationChunk(text=completion['result']) yield GenerationChunk(text=completion['result'])
...@@ -231,3 +234,14 @@ class Wenxin(LLM): ...@@ -231,3 +234,14 @@ class Wenxin(LLM):
if completion['is_end']: if completion['is_end']:
break break
else:
try:
json_response = json.loads(token)
except JSONDecodeError:
raise ValueError(f"Wenxin Response Error {token}")
raise ValueError(
f"Wenxin API {json_response['error_code']}"
f" error: {json_response['error_msg']}, "
f"please confirm if the model you have chosen is already paid for."
)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment