ragflow_preprocess/config/llm_profiles.json
2025-03-27 14:08:10 +01:00

104 lines
2.0 KiB
JSON

{
"léger": {
"vision": {
"model": "llava:34b",
"language": "en",
"temperature": 0.2,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
},
"translation": {
"model": "mistral",
"language": "fr",
"temperature": 0.1,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
},
"summary": {
"model": "mistral",
"language": "fr",
"temperature": 0.2,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
},
"rewriter": {
"model": "mistral",
"language": "fr",
"temperature": 0.3,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
}
},
"moyen": {
"vision": {
"model": "llava",
"language": "en",
"temperature": 0.2,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
},
"translation": {
"model": "qwen2.5",
"language": "fr",
"temperature": 0.1,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
},
"summary": {
"model": "deepseek-r1",
"language": "fr",
"temperature": 0.2,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
},
"rewriter": {
"model": "mistral",
"language": "fr",
"temperature": 0.3,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 1024
}
},
"avancé": {
"vision": {
"model": "llama3.2-vision",
"language": "en",
"temperature": 0.2,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 2048
},
"translation": {
"model": "deepseek",
"language": "fr",
"temperature": 0.1,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 2048
},
"summary": {
"model": "deepseek-r1",
"language": "fr",
"temperature": 0.2,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 2048
},
"rewriter": {
"model": "deepseek",
"language": "fr",
"temperature": 0.3,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 2048
}
}
}