From af5847eb0d19d7529e8c2624fda290f1db24fd5e Mon Sep 17 00:00:00 2001 From: Ladebeze66 Date: Sun, 6 Apr 2025 14:41:45 +0200 Subject: [PATCH] Ajout des classes d'agents et LLM --- agents/agent_image_analyser.py | 14 ++++ agents/agent_image_sorter.py | 15 ++++ agents/agent_json_analyser.py | 15 ++++ agents/agent_report_generator.py | 28 ++++++++ agents/base_agent.py | 22 ++++++ llm_classes/base_llm.py | 81 +++++++++++++++++++++ llm_classes/mistral_large.py | 31 ++++++++ llm_classes/mistral_large_pixtral2411.py | 31 ++++++++ llm_classes/mistral_medium.py | 31 ++++++++ llm_classes/ollama.py | 92 ++++++++++++++++++++++++ llm_classes/pixtral_12b.py | 31 ++++++++ llm_classes/pixtral_large.py | 31 ++++++++ llm_classes/pixtral_medium.py | 31 ++++++++ orchestrator.py | 14 ++++ 14 files changed, 467 insertions(+) create mode 100644 agents/agent_image_analyser.py create mode 100644 agents/agent_image_sorter.py create mode 100644 agents/agent_json_analyser.py create mode 100644 agents/agent_report_generator.py create mode 100644 agents/base_agent.py create mode 100644 llm_classes/base_llm.py create mode 100644 llm_classes/mistral_large.py create mode 100644 llm_classes/mistral_large_pixtral2411.py create mode 100644 llm_classes/mistral_medium.py create mode 100644 llm_classes/ollama.py create mode 100644 llm_classes/pixtral_12b.py create mode 100644 llm_classes/pixtral_large.py create mode 100644 llm_classes/pixtral_medium.py create mode 100644 orchestrator.py diff --git a/agents/agent_image_analyser.py b/agents/agent_image_analyser.py new file mode 100644 index 0000000..480a991 --- /dev/null +++ b/agents/agent_image_analyser.py @@ -0,0 +1,14 @@ +from .base_agent import BaseAgent + +class AgentImageAnalyser(BaseAgent): + """ + Agent pour analyser les images et extraire les informations pertinentes. + """ + def __init__(self, llm): + super().__init__("AgentImageAnalyser", llm) + + def executer(self, image_description: str, contexte: str) -> str: + prompt = f"Analyse cette image en tenant compte du contexte suivant : {contexte}. Description de l'image : {image_description}" + response = self.llm.interroger(prompt) + self.ajouter_historique("analyse_image", {"image": image_description, "contexte": contexte}, response) + return response \ No newline at end of file diff --git a/agents/agent_image_sorter.py b/agents/agent_image_sorter.py new file mode 100644 index 0000000..ba997fd --- /dev/null +++ b/agents/agent_image_sorter.py @@ -0,0 +1,15 @@ +from .base_agent import BaseAgent + +class AgentImageSorter(BaseAgent): + """ + Agent pour trier les images en fonction de leur contenu. + """ + def __init__(self, llm): + super().__init__("AgentImageSorter", llm) + + def executer(self, image_description: str) -> bool: + prompt = f"L'image suivante est-elle pertinente pour BRG_Lab ? Description : {image_description}" + response = self.llm.interroger(prompt) + result = "oui" in response.lower() + self.ajouter_historique("tri_image", image_description, result) + return result diff --git a/agents/agent_json_analyser.py b/agents/agent_json_analyser.py new file mode 100644 index 0000000..14d3dd1 --- /dev/null +++ b/agents/agent_json_analyser.py @@ -0,0 +1,15 @@ +from .base_agent import BaseAgent +from typing import Dict + +class AgentJsonAnalyser(BaseAgent): + """ + Agent pour analyser les fichiers JSON et extraire les informations pertinentes. + """ + def __init__(self, llm): + super().__init__("AgentJsonAnalyser", llm) + + def executer(self, ticket_json: Dict) -> str: + prompt = f"Analyse ce ticket JSON et identifie les éléments importants : {ticket_json}" + response = self.llm.interroger(prompt) + self.ajouter_historique("analyse_json", ticket_json, response) + return response \ No newline at end of file diff --git a/agents/agent_report_generator.py b/agents/agent_report_generator.py new file mode 100644 index 0000000..a7bac84 --- /dev/null +++ b/agents/agent_report_generator.py @@ -0,0 +1,28 @@ +import json +from .base_agent import BaseAgent +from datetime import datetime +from typing import Dict + +class AgentReportGenerator(BaseAgent): + """ + Agent pour générer un rapport à partir des informations collectées. + """ + def __init__(self, llm): + super().__init__("AgentReportGenerator", llm) + + def executer(self, rapport_data: Dict, filename: str): + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + + #Sauvegarde json + json_path = f"../reports/json_reports/{filename}_{timestamp}.json" + with open(json_path, "w", encoding="utf-8") as f_json: + json.dump(rapport_data, f_json, ensure_ascii=False, indent=4) + + #Sauvegarde Markdown + md_path =f"../repports/markdown_reports/{filename}_{timestamp}.md" + with open(md_path, "w", encoding="utf-8") as f_md: + f_md.write(f"# Rapport {filename}\n\n") + for key, value in rapport_data.items(): + f_md.write(f"## {key.capitalize()}\n{value}\n\n") + + self.ajouter_historique("generation_rapport", filename, "Rapport généré") \ No newline at end of file diff --git a/agents/base_agent.py b/agents/base_agent.py new file mode 100644 index 0000000..3f70270 --- /dev/null +++ b/agents/base_agent.py @@ -0,0 +1,22 @@ +from abc import ABC, abstractmethod +from typing import List, Dict, Any + +class BaseAgent(ABC): + """ + Classe de base pour les agents. + """ + def __init__(self, nom: str, llm: Any): + self.nom = nom + self.llm = llm + self.historique: List[Dict[str, Any]] = [] + + def ajouter_historique(self, action: str, input_data: Any, output_data: Any): + self.historique.append({ + "action": action, + "input": input_data, + "output": output_data + }) + + @abstractmethod + def _executer(self, *args, **kwargs) -> Any: + pass diff --git a/llm_classes/base_llm.py b/llm_classes/base_llm.py new file mode 100644 index 0000000..d1936fd --- /dev/null +++ b/llm_classes/base_llm.py @@ -0,0 +1,81 @@ +import abc +import json +import requests +from datetime import datetime, timedelta +from typing import Dict, Any, List, Optional + +class BaseLLM(abc.ABC): + """ Classe abstraite pour les LLM """ + + def __init__(self, modele: str): + self.modele: str = modele + self.prompt_system:str = "" + self.params: Dict[str, Any] = { + "temperature": 0.8, + "top_p": 0.9, + "top_k": 40, + "max_tokens": 1000, + "presence_penalty": 0, + "frequency_penalty": 0, + "stop": None + } + + self.dureeTraitement: timedelta = timedelta() + self.reponseErreur: bool = False + + self.heureDepart: Optional[datetime] = None + self.heureFin: Optional[datetime] = None + + @abc.abstractmethod + def urlBase(self) -> str: + pass + + @abc.abstractmethod + def cleAPI(self) -> str: + pass + + @abc.abstractmethod + def urlFonction(self) -> str: + pass + + @abc.abstractmethod + def _preparer_contenu(self, question: str) -> Dict[str, Any]: + pass + + @abc.abstractmethod + def _traiter_reponse(self, reponse: requests.Response) -> str: + pass + + def interroger(self, question: str) -> str: + url = self.urlBase() + self.urlFonction() + headers = {"Content-Type": "application/json"} + + if self.cleAPI(): + headers["Authorization"] = f"Bearer {self.cleAPI()}" + + contenu = self._preparer_contenu(question) + + self.heureDepart = datetime.now() + + try: + response = requests.post(url=url, headers=headers, json=contenu, timeout=120) + self.heureFin = datetime.now() + self.dureeTraitement = self.heureFin - self.heureDepart + + if response.status_code in [200, 201]: + self.reponseErreur = False + reponse = self._traiter_reponse(response) + else: + self.reponseErreur = True + return response.text + + except Exception as e: + self.heureFin = datetime.now() + self.dureeTraitement = self.heureFin - self.heureDepart + self.reponseErreur = True + return str(e) + + def configurer(self, **kwargs):# méthode pour configurer les paramètres + self.params.update(kwargs) + + \ No newline at end of file diff --git a/llm_classes/mistral_large.py b/llm_classes/mistral_large.py new file mode 100644 index 0000000..bd17766 --- /dev/null +++ b/llm_classes/mistral_large.py @@ -0,0 +1,31 @@ +from .base_llm import BaseLLM +import requests + +class MistralLarge(BaseLLM): + + def __init__(self): + super().__init__("mistral-large-latest") + self.configurer(temperature=0.2, top_p=1) + + def urlBase(self) -> str: + return "https://api.mistral.ai/v1/" + + def cleAPI(self) -> str: + return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh" + + def urlFonction(self) -> str: + return "chat/completions" + + def _preparer_contenu(self, question: str) -> dict: + return { + "model": self.modele, + "messages": [ + {"role": "system", "content": self.prompt_system}, + {"role": "user", "content": question} + ], + **self.params + } + + def _traiter_reponse(self, reponse: requests.Response) -> str: + data = reponse.json() + return data["choices"][0]["message"]["content"] diff --git a/llm_classes/mistral_large_pixtral2411.py b/llm_classes/mistral_large_pixtral2411.py new file mode 100644 index 0000000..e7a2a67 --- /dev/null +++ b/llm_classes/mistral_large_pixtral2411.py @@ -0,0 +1,31 @@ +from .base_llm import BaseLLM +import requests + +class MistralLargePixtral(BaseLLM): + + def __init__(self): + super().__init__("mistral-large_pixtral-2411") + self.configurer(temperature=0.2, top_p=1) + + def urlBase(self) -> str: + return "https://api.mistral.ai/v1/" + + def cleAPI(self) -> str: + return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh" + + def urlFonction(self) -> str: + return "chat/completions" + + def _preparer_contenu(self, question: str) -> dict: + return { + "model": self.modele, + "messages": [ + {"role": "system", "content": self.prompt_system}, + {"role": "user", "content": question} + ], + **self.params + } + + def _traiter_reponse(self, reponse: requests.Response) -> str: + data = reponse.json() + return data["choices"][0]["message"]["content"] diff --git a/llm_classes/mistral_medium.py b/llm_classes/mistral_medium.py new file mode 100644 index 0000000..cfeb562 --- /dev/null +++ b/llm_classes/mistral_medium.py @@ -0,0 +1,31 @@ +from .base_llm import BaseLLM +import requests + +class MistralMedium(BaseLLM): + + def __init__(self): + super().__init__("mistral-medium-latest") + self.configurer(temperature=0.2, top_p=1) + + def urlBase(self) -> str: + return "https://api.mistral.ai/v1/" + + def cleAPI(self) -> str: + return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh" + + def urlFonction(self) -> str: + return "chat/completions" + + def _preparer_contenu(self, question: str) -> dict: + return { + "model": self.modele, + "messages": [ + {"role": "system", "content": self.prompt_system}, + {"role": "user", "content": question} + ], + **self.params + } + + def _traiter_reponse(self, reponse: requests.Response) -> str: + data = reponse.json() + return data["choices"][0]["message"]["content"] diff --git a/llm_classes/ollama.py b/llm_classes/ollama.py new file mode 100644 index 0000000..694d1b7 --- /dev/null +++ b/llm_classes/ollama.py @@ -0,0 +1,92 @@ +from .base_llm import BaseLLM +import requests +from datetime import timedelta +from typing import Dict, Any + +class Ollama(BaseLLM): + """ + Classe optimisée pour interagir avec l'API Ollama. + """ + + def __init__(self, modele: str = ""): + super().__init__(modele) + self.api_url = "http://217.182.105.173:11434/api/generate" + + + self.params: Dict[str, Any] = { + "temperature": 0.8, + "top_p": 0.9, + "top_k": 40, + "num_ctx": 2048, + "repeat_penalty": 1.1, + "repeat_last_n": 64, + "mirostat": 0, + "mirostat_eta": 0.1, + "mirostat_tau": 5, + "keep_alive": int(timedelta(minutes=5).total_seconds()), + "num_predict": -1, + "min_p": 0, + "seed": 0, + "stop": [], + "stream": False + } + + def urlBase(self) -> str: + """ + Retourne l'URL de base de l'API Ollama. + """ + return "http://217.182.105.173:11434/" + + def cleAPI(self) -> str: + """ + Ollama ne nécessite pas de clé API par défaut. + """ + return "" + + def urlFonction(self) -> str: + """ + Retourne l'URL spécifique à Ollama pour générer une réponse. + """ + return "api/generate" + + def _preparer_contenu(self, question: str) -> Dict[str, Any]: + """ + Prépare le contenu de la requête spécifique pour Ollama. + """ + contenu = { + "model": self.modele, + "prompt": question, + "options": { + "temperature": self.params["temperature"], + "top_p": self.params["top_p"], + "top_k": self.params["top_k"], + "num_ctx": self.params["num_ctx"], + "repeat_penalty": self.params["repeat_penalty"], + "repeat_last_n": self.params["repeat_last_n"], + "mirostat": self.params["mirostat"], + "mirostat_eta": self.params["mirostat_eta"], + "mirostat_tau": self.params["mirostat_tau"], + "keep_alive": self.params["keep_alive"], + "num_predict": self.params["num_predict"], + "min_p": self.params["min_p"], + "seed": self.params["seed"], + "stop": self.params["stop"], + }, + "stream": self.params["stream"] + } + return contenu + + def _traiter_reponse(self, reponse: requests.Response) -> str: + """ + Traite et retourne la réponse fournie par Ollama. + """ + data = reponse.json() + return data.get("response", "") + + def configurer(self, **kwargs): + """ + Mise à jour facile des paramètres spécifiques à Ollama. + """ + for key, value in kwargs.items(): + if key in self.params: + self.params[key] = value diff --git a/llm_classes/pixtral_12b.py b/llm_classes/pixtral_12b.py new file mode 100644 index 0000000..c4d90de --- /dev/null +++ b/llm_classes/pixtral_12b.py @@ -0,0 +1,31 @@ +from .base_llm import BaseLLM +import requests + +class Pixtral12b(BaseLLM): + + def __init__(self): + super().__init__("pixtral-12b-latest") + self.configurer(temperature=0.2, top_p=1) + + def urlBase(self) -> str: + return "https://api.mistral.ai/v1/" + + def cleAPI(self) -> str: + return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh" + + def urlFonction(self) -> str: + return "chat/completions" + + def _preparer_contenu(self, question: str) -> dict: + return { + "model": self.modele, + "messages": [ + {"role": "system", "content": self.prompt_system}, + {"role": "user", "content": question} + ], + **self.params + } + + def _traiter_reponse(self, reponse: requests.Response) -> str: + data = reponse.json() + return data["choices"][0]["message"]["content"] diff --git a/llm_classes/pixtral_large.py b/llm_classes/pixtral_large.py new file mode 100644 index 0000000..b39ef6a --- /dev/null +++ b/llm_classes/pixtral_large.py @@ -0,0 +1,31 @@ +from .base_llm import BaseLLM +import requests + +class PixtralLarge(BaseLLM): + + def __init__(self): + super().__init__("pixtral-large-latest") + self.configurer(temperature=0.2, top_p=1) + + def urlBase(self) -> str: + return "https://api.mistral.ai/v1/" + + def cleAPI(self) -> str: + return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh" + + def urlFonction(self) -> str: + return "chat/completions" + + def _preparer_contenu(self, question: str) -> dict: + return { + "model": self.modele, + "messages": [ + {"role": "system", "content": self.prompt_system}, + {"role": "user", "content": question} + ], + **self.params + } + + def _traiter_reponse(self, reponse: requests.Response) -> str: + data = reponse.json() + return data["choices"][0]["message"]["content"] diff --git a/llm_classes/pixtral_medium.py b/llm_classes/pixtral_medium.py new file mode 100644 index 0000000..897f935 --- /dev/null +++ b/llm_classes/pixtral_medium.py @@ -0,0 +1,31 @@ +from .base_llm import BaseLLM +import requests + +class PixtralMedium(BaseLLM): + + def __init__(self): + super().__init__("pixtral-medium-latest") + self.configurer(temperature=0.2, top_p=1) + + def urlBase(self) -> str: + return "https://api.mistral.ai/v1/" + + def cleAPI(self) -> str: + return "2iGzTzE9csRQ9IoASoUjplHwEjA200Vh" + + def urlFonction(self) -> str: + return "chat/completions" + + def _preparer_contenu(self, question: str) -> dict: + return { + "model": self.modele, + "messages": [ + {"role": "system", "content": self.prompt_system}, + {"role": "user", "content": question} + ], + **self.params + } + + def _traiter_reponse(self, reponse: requests.Response) -> str: + data = reponse.json() + return data["choices"][0]["message"]["content"] diff --git a/orchestrator.py b/orchestrator.py new file mode 100644 index 0000000..76578c9 --- /dev/null +++ b/orchestrator.py @@ -0,0 +1,14 @@ +import os +import json +from typing import List, Dict +from agents.agent_json_analyser import AgentJsonAnalyser +from agents.agent_image_sorter import AgentImageSorter +from agents.agent_image_analyser import AgentImageAnalyser +from agents.agent_report_generator import AgentReportGenerator +from llm_classes.MistralMesh import MistralMesh + + + +class Orchestrator: + """ + """ \ No newline at end of file