fix: Update default chat model to grok-4-1-fast-reasoning and enhance logging for LLM responses

This commit is contained in:
bsiggel
2026-03-19 09:50:31 +00:00
parent 7fffdb2660
commit 2ac83df1e0
2 changed files with 15 additions and 3 deletions

View File

@@ -17,7 +17,7 @@ class LangChainXAIService:
Usage:
service = LangChainXAIService(ctx)
model = service.get_chat_model(model="grok-2-latest")
model = service.get_chat_model(model="grok-4-1-fast-reasoning")
model_with_tools = service.bind_file_search(model, collection_id)
result = await service.invoke_chat(model_with_tools, messages)
"""
@@ -46,7 +46,7 @@ class LangChainXAIService:
def get_chat_model(
self,
model: str = "grok-2-latest",
model: str = "grok-4-1-fast-reasoning",
temperature: float = 0.7,
max_tokens: Optional[int] = None
):
@@ -54,7 +54,7 @@ class LangChainXAIService:
Initialisiert ChatXAI Model.
Args:
model: Model name (default: grok-2-latest)
model: Model name (default: grok-4-1-fast-reasoning)
temperature: Sampling temperature 0.0-1.0
max_tokens: Optional max tokens for response