Fix critical model configuration bug
CRITICAL FIX for beginners: User config model changes now work correctly Issues Fixed: - rag-mini.py synthesis mode ignored config completely (used hardcoded models) - LLMSynthesizer fallback ignored config preferences - Users changing model in config saw no effect in synthesis mode Changes: - rag-mini.py now loads config and passes synthesis_model to LLMSynthesizer - LLMSynthesizer _select_best_model() respects config model_rankings for fallback - All modes (synthesis and explore) now properly use config settings Tested: Model config changes now work correctly in both synthesis and explore modes
This commit is contained in:
parent
b9f8957cca
commit
75b5175590
@ -64,7 +64,10 @@ class LLMSynthesizer:
|
|||||||
def _select_best_model(self) -> str:
|
def _select_best_model(self) -> str:
|
||||||
"""Select the best available model based on configuration rankings."""
|
"""Select the best available model based on configuration rankings."""
|
||||||
if not self.available_models:
|
if not self.available_models:
|
||||||
return "qwen2.5:1.5b" # Fallback preference
|
# Use config fallback if available, otherwise use default
|
||||||
|
if self.config and hasattr(self.config, 'llm') and hasattr(self.config.llm, 'model_rankings') and self.config.llm.model_rankings:
|
||||||
|
return self.config.llm.model_rankings[0] # First preferred model
|
||||||
|
return "qwen2.5:1.5b" # System fallback only if no config
|
||||||
|
|
||||||
# Get model rankings from config or use defaults
|
# Get model rankings from config or use defaults
|
||||||
if self.config and hasattr(self.config, 'llm') and hasattr(self.config.llm, 'model_rankings'):
|
if self.config and hasattr(self.config, 'llm') and hasattr(self.config.llm, 'model_rankings'):
|
||||||
|
|||||||
11
rag-mini.py
11
rag-mini.py
@ -192,7 +192,16 @@ def search_project(project_path: Path, query: str, top_k: int = 10, synthesize:
|
|||||||
# LLM Synthesis if requested
|
# LLM Synthesis if requested
|
||||||
if synthesize:
|
if synthesize:
|
||||||
print("🧠 Generating LLM synthesis...")
|
print("🧠 Generating LLM synthesis...")
|
||||||
synthesizer = LLMSynthesizer()
|
|
||||||
|
# Load config to respect user's model preferences
|
||||||
|
from mini_rag.config import ConfigManager
|
||||||
|
config_manager = ConfigManager(project_path)
|
||||||
|
config = config_manager.load_config()
|
||||||
|
|
||||||
|
synthesizer = LLMSynthesizer(
|
||||||
|
model=config.llm.synthesis_model if config.llm.synthesis_model != "auto" else None,
|
||||||
|
config=config
|
||||||
|
)
|
||||||
|
|
||||||
if synthesizer.is_available():
|
if synthesizer.is_available():
|
||||||
synthesis = synthesizer.synthesize_search_results(query, results, project_path)
|
synthesis = synthesizer.synthesize_search_results(query, results, project_path)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user