Skip to content

Commit ddf5606

Browse files
authored
Bugfix test exception (#4171)
* feat(log):add_request_and_response_log * modify default error type
1 parent c3b8ebe commit ddf5606

File tree

4 files changed

+18
-12
lines changed

4 files changed

+18
-12
lines changed

fastdeploy/entrypoints/openai/serving_chat.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -87,15 +87,15 @@ async def create_chat_completion(self, request: ChatCompletionRequest):
8787
f"Only master node can accept completion request, please send request to master node: {self.master_ip}"
8888
)
8989
api_server_logger.error(err_msg)
90-
return ErrorResponse(error=ErrorInfo(message=err_msg, type=ErrorType.SERVER_ERROR))
90+
return ErrorResponse(error=ErrorInfo(message=err_msg, type=ErrorType.INTERNAL_ERROR))
9191

9292
if self.models:
9393
is_supported, request.model = self.models.is_supported_model(request.model)
9494
if not is_supported:
9595
err_msg = f"Unsupported model: [{request.model}], support [{', '.join([x.name for x in self.models.model_paths])}] or default"
9696
api_server_logger.error(err_msg)
9797
return ErrorResponse(
98-
error=ErrorInfo(message=err_msg, type=ErrorType.SERVER_ERROR, code=ErrorCode.MODEL_NOT_SUPPORT)
98+
error=ErrorInfo(message=err_msg, type=ErrorType.INTERNAL_ERROR, code=ErrorCode.MODEL_NOT_SUPPORT)
9999
)
100100

101101
try:
@@ -145,7 +145,7 @@ async def create_chat_completion(self, request: ChatCompletionRequest):
145145
except Exception as e:
146146
error_msg = f"request[{request_id}]full generator error: {str(e)}, {str(traceback.format_exc())}"
147147
api_server_logger.error(error_msg)
148-
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.SERVER_ERROR))
148+
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.INTERNAL_ERROR))
149149
except Exception as e:
150150
error_msg = (
151151
f"request[{request_id}] waiting error: {str(e)}, {str(traceback.format_exc())}, "
@@ -158,7 +158,7 @@ async def create_chat_completion(self, request: ChatCompletionRequest):
158158

159159
def _create_streaming_error_response(self, message: str) -> str:
160160
api_server_logger.error(message)
161-
error_response = ErrorResponse(error=ErrorInfo(message=message, type=ErrorType.SERVER_ERROR))
161+
error_response = ErrorResponse(error=ErrorInfo(message=message, type=ErrorType.INTERNAL_ERROR))
162162
return error_response.model_dump_json()
163163

164164
async def chat_completion_stream_generator(

fastdeploy/entrypoints/openai/serving_completion.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -64,14 +64,14 @@ async def create_completion(self, request: CompletionRequest):
6464
f"Only master node can accept completion request, please send request to master node: {self.master_ip}"
6565
)
6666
api_server_logger.error(err_msg)
67-
return ErrorResponse(error=ErrorInfo(message=err_msg, type=ErrorType.SERVER_ERROR))
67+
return ErrorResponse(error=ErrorInfo(message=err_msg, type=ErrorType.INTERNAL_ERROR))
6868
if self.models:
6969
is_supported, request.model = self.models.is_supported_model(request.model)
7070
if not is_supported:
7171
err_msg = f"Unsupported model: [{request.model}], support [{', '.join([x.name for x in self.models.model_paths])}] or default"
7272
api_server_logger.error(err_msg)
7373
return ErrorResponse(
74-
error=ErrorInfo(message=err_msg, type=ErrorType.SERVER_ERROR, code=ErrorCode.MODEL_NOT_SUPPORT)
74+
error=ErrorInfo(message=err_msg, type=ErrorType.INTERNAL_ERROR, code=ErrorCode.MODEL_NOT_SUPPORT)
7575
)
7676
created_time = int(time.time())
7777
if request.user is not None:
@@ -115,7 +115,7 @@ async def create_completion(self, request: CompletionRequest):
115115
except Exception as e:
116116
error_msg = f"OpenAIServingCompletion create_completion: {e}, {str(traceback.format_exc())}"
117117
api_server_logger.error(error_msg)
118-
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.SERVER_ERROR))
118+
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.INTERNAL_ERROR))
119119

120120
if request_prompt_ids is not None:
121121
request_prompts = request_prompt_ids
@@ -189,12 +189,12 @@ async def create_completion(self, request: CompletionRequest):
189189
f"OpenAIServingCompletion completion_full_generator error: {e}, {str(traceback.format_exc())}"
190190
)
191191
api_server_logger.error(error_msg)
192-
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.SERVER_ERROR))
192+
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.INTERNAL_ERROR))
193193

194194
except Exception as e:
195195
error_msg = f"OpenAIServingCompletion create_completion error: {e}, {str(traceback.format_exc())}"
196196
api_server_logger.error(error_msg)
197-
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.SERVER_ERROR))
197+
return ErrorResponse(error=ErrorInfo(message=error_msg, type=ErrorType.INTERNAL_ERROR))
198198

199199
async def completion_full_generator(
200200
self,

fastdeploy/entrypoints/openai/serving_models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ async def list_models(self) -> ModelList:
8787
f"Only master node can accept models request, please send request to master node: {self.master_ip}"
8888
)
8989
api_server_logger.error(err_msg)
90-
return ErrorResponse(error=ErrorInfo(message=err_msg, type=ErrorType.SERVER_ERROR))
90+
return ErrorResponse(error=ErrorInfo(message=err_msg, type=ErrorType.INTERNAL_ERROR))
9191
model_infos = [
9292
ModelInfo(
9393
id=model.name, max_model_len=self.max_model_len, root=model.model_path, permission=[ModelPermission()]

tests/utils/test_exception_handler.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,10 @@
88
from fastdeploy.utils import ErrorCode, ExceptionHandler, ParameterError
99

1010

11+
class DummyRequest:
12+
url = "http://testserver/test"
13+
14+
1115
class TestParameterError(unittest.TestCase):
1216
def test_parameter_error_init(self):
1317
exc = ParameterError("param1", "error message")
@@ -30,7 +34,8 @@ async def test_handle_exception(self):
3034
async def test_handle_request_validation_missing_messages(self):
3135
"""缺少 messages 参数时,应返回 missing_required_parameter"""
3236
exc = RequestValidationError([{"loc": ("body", "messages"), "msg": "Field required", "type": "missing"}])
33-
resp: JSONResponse = await ExceptionHandler.handle_request_validation_exception(None, exc)
37+
dummy_request = DummyRequest()
38+
resp: JSONResponse = await ExceptionHandler.handle_request_validation_exception(dummy_request, exc)
3439
data = json.loads(resp.body.decode())
3540
self.assertEqual(resp.status_code, HTTPStatus.BAD_REQUEST)
3641
self.assertEqual(data["error"]["param"], "messages")
@@ -42,7 +47,8 @@ async def test_handle_request_validation_invalid_value(self):
4247
exc = RequestValidationError(
4348
[{"loc": ("body", "top_p"), "msg": "Input should be less than or equal to 1", "type": "value_error"}]
4449
)
45-
resp: JSONResponse = await ExceptionHandler.handle_request_validation_exception(None, exc)
50+
dummy_request = DummyRequest()
51+
resp: JSONResponse = await ExceptionHandler.handle_request_validation_exception(dummy_request, exc)
4652
data = json.loads(resp.body.decode())
4753
self.assertEqual(resp.status_code, HTTPStatus.BAD_REQUEST)
4854
self.assertEqual(data["error"]["param"], "top_p")

0 commit comments

Comments
 (0)