Skip to content

feature: xAI support. #271

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions api/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from api.openrouter_client import OpenRouterClient
from api.bedrock_client import BedrockClient
from api.azureai_client import AzureAIClient
from api.xai_client import XAIClient
from adalflow import GoogleGenAIClient, OllamaClient

# Get API keys from environment variables
Expand Down Expand Up @@ -53,7 +54,8 @@
"OpenRouterClient": OpenRouterClient,
"OllamaClient": OllamaClient,
"BedrockClient": BedrockClient,
"AzureAIClient": AzureAIClient
"AzureAIClient": AzureAIClient,
"XAIClient": XAIClient
}

def replace_env_placeholders(config: Union[Dict[str, Any], List[Any], str, Any]) -> Union[Dict[str, Any], List[Any], str, Any]:
Expand Down Expand Up @@ -121,14 +123,15 @@ def load_generator_config():
if provider_config.get("client_class") in CLIENT_CLASSES:
provider_config["model_client"] = CLIENT_CLASSES[provider_config["client_class"]]
# Fall back to default mapping based on provider_id
elif provider_id in ["google", "openai", "openrouter", "ollama", "bedrock", "azure"]:
elif provider_id in ["google", "openai", "openrouter", "ollama", "bedrock", "azure", "xai"]:
default_map = {
"google": GoogleGenAIClient,
"openai": OpenAIClient,
"openrouter": OpenRouterClient,
"ollama": OllamaClient,
"bedrock": BedrockClient,
"azure": AzureAIClient
"azure": AzureAIClient,
"xai": XAIClient
}
provider_config["model_client"] = default_map[provider_id]
else:
Expand Down
19 changes: 19 additions & 0 deletions api/config/generator.json
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,25 @@
"top_p": 0.8
}
}
},
"xai": {
"client_class": "XAIClient",
"default_model": "grok-4-0709",
"supportsCustomModel": true,
"models": {
"grok-4-0709": {
"temperature": 0.7
},
"grok-3": {
"temperature": 0.7
},
"grok-2": {
"temperature": 0.7
},
"grok-beta": {
"temperature": 0.7
}
}
}
}
}
Expand Down
1 change: 1 addition & 0 deletions api/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,5 @@ boto3>=1.34.0
websockets>=11.0.3
azure-identity>=1.12.0
azure-core>=1.24.0
xai-sdk>=0.1.0

34 changes: 34 additions & 0 deletions api/websocket_wiki.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from api.openai_client import OpenAIClient
from api.openrouter_client import OpenRouterClient
from api.azureai_client import AzureAIClient
from api.xai_client import XAIClient
from api.rag import RAG

# Configure logging
Expand Down Expand Up @@ -505,6 +506,22 @@ async def handle_websocket_chat(websocket: WebSocket):
"top_p": model_config["top_p"]
}

api_kwargs = model.convert_inputs_to_api_kwargs(
input=prompt,
model_kwargs=model_kwargs,
model_type=ModelType.LLM
)
elif request.provider == "xai":
logger.info(f"Using xAI with model: {request.model}")

# Initialize xAI client
model = XAIClient()
model_kwargs = {
"model": request.model,
"stream": True,
"temperature": model_config["temperature"]
}

api_kwargs = model.convert_inputs_to_api_kwargs(
input=prompt,
model_kwargs=model_kwargs,
Expand Down Expand Up @@ -594,6 +611,23 @@ async def handle_websocket_chat(websocket: WebSocket):
await websocket.send_text(error_msg)
# Close the WebSocket connection after sending the error message
await websocket.close()
elif request.provider == "xai":
try:
# Get the response and handle it properly using the previously created api_kwargs
logger.info("Making xAI API call")
response = await model.acall(api_kwargs=api_kwargs, model_type=ModelType.LLM)
# Handle streaming response from xAI
async for chunk in response:
if chunk: # xAI returns text chunks directly
await websocket.send_text(chunk)
# Explicitly close the WebSocket connection after the response is complete
await websocket.close()
except Exception as e_xai:
logger.error(f"Error with xAI API: {str(e_xai)}")
error_msg = f"\nError with xAI API: {str(e_xai)}\n\nPlease check that you have set the XAI_API_KEY environment variable with a valid API key."
await websocket.send_text(error_msg)
# Close the WebSocket connection after sending the error message
await websocket.close()
else:
# Generate streaming response
response = model.generate_content(prompt, stream=True)
Expand Down
Loading