mirror of
https://github.com/Ladebeze66/AIagent.git
synced 2025-12-15 19:26:49 +01:00
115 lines
3.7 KiB
Python
115 lines
3.7 KiB
Python
import json
|
|
from typing import Dict, List, Any, Optional, Union
|
|
from datetime import timedelta
|
|
from .llm import LLM
|
|
|
|
class Ollama(LLM):
|
|
"""
|
|
Classe pour l'intégration avec l'API Ollama
|
|
Cette classe hérite de la classe de base LLM
|
|
"""
|
|
|
|
def __init__(self):
|
|
"""
|
|
Initialisation des attributs spécifiques à Ollama
|
|
"""
|
|
super().__init__()
|
|
|
|
# Attributs spécifiques à Ollama
|
|
self.suffix: str = ""
|
|
self.images: bytes = b"" # Équivalent à Buffer en WLangage
|
|
self.template: str = ""
|
|
self.raw: bool = False
|
|
self.keep_alive: timedelta = timedelta(minutes=5) # 5min en WLangage
|
|
self.o_mirostat: int = 0
|
|
self.o_mirostat_eta: float = 0.1
|
|
self.o_mirostat_tau: float = 5
|
|
self.o_num_ctx: int = 2048
|
|
self.o_repeat_last_n: int = 64
|
|
self.o_repeat_penalty: float = 1.1
|
|
self.o_seed: int = 0
|
|
self.o_stop: List[str] = []
|
|
self.o_num_predict: int = -1
|
|
self.o_min_p: float = 0
|
|
|
|
# Initialisation par défaut
|
|
self.o_mirostat = 0
|
|
self.o_mirostat_eta = 0.1
|
|
self.o_mirostat_tau = 5
|
|
self.o_num_ctx = 2048
|
|
self.o_repeat_last_n = 64
|
|
self.o_repeat_penalty = 1.1
|
|
self.o_temperature = 0.8
|
|
self.o_seed = 0
|
|
self.o_stop = []
|
|
self.o_num_predict = -1
|
|
self.o_top_k = 40
|
|
self.o_top_p = 0.9
|
|
self.o_min_p = 0
|
|
self.raw = False
|
|
self.keep_alive = timedelta(minutes=5) # 5min en WLangage
|
|
|
|
def urlBase(self) -> str:
|
|
"""
|
|
Retourne l'URL de base de l'API Ollama
|
|
"""
|
|
return "http://217.182.105.173:11434/"
|
|
|
|
def cleAPI(self) -> str:
|
|
"""
|
|
Retourne la clé API Ollama (vide par défaut)
|
|
"""
|
|
return ""
|
|
|
|
def Interroger(self, question: str) -> str:
|
|
"""
|
|
Interroge Ollama avec une question
|
|
Modifie l'URL de fonction avant l'appel
|
|
"""
|
|
self.urlFonction = "api/generate"
|
|
return self.LLM_POST(question)
|
|
|
|
def _even_LLM_POST(self, question: str) -> None:
|
|
"""
|
|
Préparation du contenu de la requête pour Ollama
|
|
"""
|
|
# Paramètres de base
|
|
self._Contenu["system"] = self.prompt_system
|
|
self._Contenu["prompt"] = question
|
|
self._Contenu["stream"] = self._stream
|
|
self._Contenu["suffix"] = self.suffix
|
|
self._Contenu["format"] = self.format
|
|
self._Contenu["raw"] = self.raw
|
|
|
|
# Conversion de timedelta en nombre de secondes pour keep_alive
|
|
if isinstance(self.keep_alive, timedelta):
|
|
self._Contenu["keep_alive"] = int(self.keep_alive.total_seconds())
|
|
|
|
# Traitement des images si présentes
|
|
if self.images:
|
|
# À implémenter si nécessaire
|
|
pass
|
|
|
|
# Options avancées
|
|
self._Contenu["options"] = {
|
|
"mirostat": self.o_mirostat,
|
|
"mirostat_eta": self.o_mirostat_eta,
|
|
"mirostat_tau": self.o_mirostat_tau,
|
|
"num_ctx": self.o_num_ctx,
|
|
"repeat_last_n": self.o_repeat_last_n,
|
|
"repeat_penalty": self.o_repeat_penalty,
|
|
"temperature": self.o_temperature,
|
|
"seed": self.o_seed,
|
|
"stop": self.o_stop,
|
|
"num_predict": self.o_num_predict,
|
|
"top_k": self.o_top_k,
|
|
"top_p": self.o_top_p,
|
|
"min_p": self.o_min_p
|
|
}
|
|
|
|
def _interrogerRetourneReponse(self, reponse: str) -> str:
|
|
"""
|
|
Extraction de la réponse à partir du JSON retourné par Ollama
|
|
"""
|
|
data = json.loads(reponse)
|
|
return data["response"] |