-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathserver.py
More file actions
125 lines (94 loc) · 3.06 KB
/
server.py
File metadata and controls
125 lines (94 loc) · 3.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
"""MLX-native cross-encoder reranker served via FastAPI.
Uses Qwen3-Reranker with mlx_lm for true cross-encoder scoring on Apple Silicon.
Exposes a Cohere-compatible /v1/rerank endpoint.
"""
import math
import os
import time
from contextlib import asynccontextmanager
import mlx.core as mx
from fastapi import FastAPI
from mlx_lm import load
from pydantic import BaseModel, Field
MODEL_ID = os.environ.get(
"RERANKER_MODEL_ID", "mlx-community/Qwen3-Reranker-4B-mxfp8"
)
HOST = os.environ.get("HOST", "127.0.0.1")
PORT = int(os.environ.get("PORT", "8001"))
SYSTEM_PROMPT = (
"Judge whether the Document is relevant to the Query. "
'Answer only "yes" or "no".'
)
_model = None
_tokenizer = None
_yes_id = None
_no_id = None
def _load_model():
global _model, _tokenizer, _yes_id, _no_id
t0 = time.time()
_model, _tokenizer = load(MODEL_ID)
_yes_id = _tokenizer.encode("yes", add_special_tokens=False)[-1]
_no_id = _tokenizer.encode("no", add_special_tokens=False)[-1]
# Warmup
_score_pair("warmup", "warmup")
print(f"Model {MODEL_ID} loaded in {time.time() - t0:.1f}s")
@asynccontextmanager
async def lifespan(app: FastAPI):
_load_model()
yield
app = FastAPI(title="mlx-rerank", lifespan=lifespan)
class RerankRequest(BaseModel):
query: str
documents: list[str]
model: str | None = None
top_n: int | None = None
return_documents: bool = True
class RerankResult(BaseModel):
index: int
relevance_score: float
document: dict[str, str] | None = None
class RerankResponse(BaseModel):
id: str = ""
results: list[RerankResult]
model: str = Field(default="")
usage: dict = Field(default_factory=lambda: {"total_tokens": 0})
def _score_pair(query: str, document: str) -> float:
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": f"<Query>: {query}\n<Document>: {document}"},
]
text = _tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False,
)
input_ids = _tokenizer.encode(text)
logits = _model(mx.array([input_ids]))
mx.eval(logits)
last = logits[0, -1, :]
yes_logit = last[_yes_id].item()
no_logit = last[_no_id].item()
return 1.0 / (1.0 + math.exp(-(yes_logit - no_logit)))
@app.post("/v1/rerank")
async def rerank(req: RerankRequest) -> RerankResponse:
scored = []
for i, doc in enumerate(req.documents):
score = _score_pair(req.query, doc)
result = RerankResult(index=i, relevance_score=score)
if req.return_documents:
result.document = {"text": doc}
scored.append(result)
scored.sort(key=lambda r: r.relevance_score, reverse=True)
if req.top_n is not None:
scored = scored[: req.top_n]
return RerankResponse(
results=scored,
model=req.model or MODEL_ID,
)
@app.get("/health")
async def health():
return {"status": "ok", "model": MODEL_ID}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host=HOST, port=PORT)