mirror of
https://github.com/Ladebeze66/coffreobsidian.git
synced 2025-12-16 21:37:47 +01:00
117 lines
3.6 KiB
Markdown
117 lines
3.6 KiB
Markdown
# ollama_interface.py
|
|
import requests
|
|
|
|
class OllamaInterface:
|
|
def __init__(self, model="mistral", **params):
|
|
self.model = model
|
|
self.params = params
|
|
self.api_url = "http://localhost:11434/api/generate"
|
|
|
|
def generate(self, prompt: str) -> str:
|
|
payload = {"model": self.model, "prompt": prompt, **self.params}
|
|
try:
|
|
response = requests.post(self.api_url, json=payload)
|
|
response.raise_for_status()
|
|
return response.json().get("response", "")
|
|
except Exception as e:
|
|
return f"[Error] {e}"
|
|
|
|
|
|
# base_agent.py
|
|
from ollama_interface import OllamaInterface
|
|
|
|
class BaseAgent:
|
|
def __init__(self, name, model="mistral", system_prompt="", **params):
|
|
self.name = name
|
|
self.system_prompt = system_prompt.strip()
|
|
self.llm = OllamaInterface(model=model, **params)
|
|
|
|
def process(self, input_text: str) -> str:
|
|
prompt = f"{self.system_prompt}\n\n{input_text}" if self.system_prompt else input_text
|
|
return self.llm.generate(prompt)
|
|
|
|
|
|
# agents.py
|
|
from base_agent import BaseAgent
|
|
|
|
class ObsidianAgent(BaseAgent):
|
|
def __init__(self):
|
|
super().__init__(
|
|
name="ObsidianAgent",
|
|
model="mistral",
|
|
system_prompt="Tu es un assistant spécialisé dans la structuration et la synthèse de notes Markdown dans Obsidian.",
|
|
temperature=0.6, top_k=40, top_p=0.95
|
|
)
|
|
|
|
class CursorAgent(BaseAgent):
|
|
def __init__(self):
|
|
super().__init__(
|
|
name="CursorAgent",
|
|
model="codellama:13b-python",
|
|
system_prompt="Tu es un assistant de développement intégré à Cursor, spécialisé Python. Reste précis, rapide et concis.",
|
|
temperature=0.3, top_k=60, top_p=0.9
|
|
)
|
|
|
|
class MistralServerAgent(BaseAgent):
|
|
def __init__(self):
|
|
super().__init__(
|
|
name="MistralServerAgent",
|
|
model="mistral:7b-instruct",
|
|
system_prompt="Tu es un chatbot concis et réactif utilisé via une interface web.",
|
|
temperature=0.7, top_k=40, top_p=0.92
|
|
)
|
|
|
|
|
|
# agent_chain.py
|
|
class AgentChain:
|
|
def __init__(self, chain_mode=True):
|
|
self.agents = []
|
|
self.chain_mode = chain_mode
|
|
|
|
def add_agent(self, agent):
|
|
self.agents.append(agent)
|
|
|
|
def run(self, input_text):
|
|
if not self.agents:
|
|
return "[Error] Aucun agent enregistré."
|
|
|
|
if self.chain_mode:
|
|
result = input_text
|
|
for agent in self.agents:
|
|
result = agent.process(result)
|
|
return result
|
|
else:
|
|
return {agent.name: agent.process(input_text) for agent in self.agents}
|
|
|
|
|
|
# main.py
|
|
from agents import ObsidianAgent, CursorAgent, MistralServerAgent
|
|
from agent_chain import AgentChain
|
|
|
|
obsidian = ObsidianAgent()
|
|
cursor = CursorAgent()
|
|
mistral_web = MistralServerAgent()
|
|
|
|
# Exemple avec chain_mode=False pour obtenir toutes les réponses séparément
|
|
dual_chain = AgentChain(chain_mode=False)
|
|
dual_chain.add_agent(obsidian)
|
|
dual_chain.add_agent(cursor)
|
|
dual_chain.add_agent(mistral_web)
|
|
|
|
input_text = "Organise mes notes Markdown sur un projet de chatbot Python."
|
|
results = dual_chain.run(input_text)
|
|
for agent_name, output in results.items():
|
|
print(f"\n[{agent_name}]\n{output}")
|
|
|
|
# Exemple avec chain_mode=True pour chaîner les réponses
|
|
print("\n--- Mode Chaîné ---")
|
|
chained = AgentChain(chain_mode=True)
|
|
chained.add_agent(obsidian)
|
|
chained.add_agent(cursor)
|
|
chained.add_agent(mistral_web)
|
|
|
|
final_output = chained.run(input_text)
|
|
print("\n[Résultat Final Chaîné]\n", final_output)
|
|
|
|
[[Agent LLM recherches 2]]
|
|
[[Création de classes agents pour LLM locaux avec Cursor et Ollama]] |