llm_lab/tests/test_combined_analysis.py
2025-03-26 15:02:58 +01:00

36 lines
995 B
Python

import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from core.factory import LLMFactory
from agents.roles import AGENTS
# Cas d'utilisation : analyse complète image + ticket support (filtré)
role = "support_analyzer"
prompt = "Analyze the support ticket and the attached image. Summarize the problem, the likely cause and whether it is resolved."
image_path = ["images/ticket_context.png"]
json_ticket_path = "data/ticket_001.txt" # version lisible du ticket
custom_params = {
"temperature": 0.3,
"top_p": 1.0,
"format": "json"
}
with open(json_ticket_path, "r", encoding="utf-8") as f:
ticket_text = f.read()
model = LLMFactory.create("llama3.2-vision:90b")
model.set_role(role, AGENTS[role])
model.params.update(custom_params)
response_en, response_fr = model.generate(
user_prompt=f"""{prompt}
{ticket_text}
""",
images=image_path,
translate=True
)
print("[EN]", response_en)
print("[FR]", response_fr)