LLM ้ฉ้ ๅจ้็ผๆๅ (LLM Adapter Development Guide)
่งฃๆฑบ้ขจ้ช: ๅฐ็นๅฎๆ่กๆฃง (Gemini) ็ๅผทไพ่ณด
ๆฌๆๅ่ชชๆๅฆไฝ็บ Boring-Gemini ๆทปๅ ๆฐ็ LLM Provider๏ผๅฏฆ็พๆ่กๆฃงๅคๅ ๅใ
๐ฏ ๆฆ่ฟฐ
Boring-Gemini ไฝฟ็จๆฝ่ฑก็ LLMProvider ไป้ขไพๆฏๆๅค็จฎ่ช่จๆจกๅๅพ็ซฏ๏ผ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ LLM Provider ๆถๆง โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค
โ โ
โ โโโโโโโโโโโโโโโโโโโ โ
โ โ LLMProvider โ โ
โ โ (Abstract) โ โ
โ โโโโโโโโโโฌโโโโโโโโโ โ
โ โ โ
โ โโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโผโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโ โ
โ โผ โผ โผ โผ โผ โ
โ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โ
โ โ Gemini โ โ Ollama โ โ OpenAI โ โ Claude โ โ ไฝ ็ โ โ
โ โ Provider โ โ Provider โ โ Compat โ โ Adapter โ โ Provider โ โ
โ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโ โ
โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
๐ฆ ๆ ธๅฟไป้ข
LLMProvider (ๆฝ่ฑกๅบ้ก)
ไฝ็ฝฎ: src/boring/llm/provider.py
from abc import abstractmethod
from boring.interfaces import LLMClient, LLMResponse
class LLMProvider(LLMClient):
"""
Extended LLM Client interface that allows for more flexible configuration
and swapping of backends (Gemini, Ollama, LMStudio, etc.)
"""
@property
@abstractmethod
def model_name(self) -> str:
"""Name of the specific model being used"""
pass
@property
@abstractmethod
def is_available(self) -> bool:
"""Check if the provider/CLI is available and configured"""
pass
@abstractmethod
def generate(
self,
prompt: str,
context: str = "",
system_instruction: str = "",
timeout_seconds: int = 600,
) -> tuple[str, bool]:
"""
Generate text from prompt and context.
Returns:
tuple[str, bool]: (generated_text, success)
"""
pass
@abstractmethod
def generate_with_tools(
self,
prompt: str,
context: str = "",
system_instruction: str = "",
timeout_seconds: int = 600,
) -> LLMResponse:
"""
Generate text and/or function calls.
Returns:
LLMResponse with text and function_calls
"""
pass
def get_token_usage(self) -> dict[str, int]:
"""Return token usage statistics if available"""
return {}
LLMResponse (ๆธๆๆจกๅ)
ไฝ็ฝฎ: src/boring/interfaces.py
from dataclasses import dataclass, field
from typing import Any
@dataclass
class LLMResponse:
"""Standardized LLM response"""
text: str = ""
function_calls: list[dict[str, Any]] = field(default_factory=list)
success: bool = True
error: str = ""
raw_response: Any = None
๐ ๅฏฆ็พๆฐ็ Provider
ๆญฅ้ฉ 1: ๅตๅปบ Provider ๆไปถ
ๅจ src/boring/llm/ ๅตๅปบๆฐๆไปถ๏ผไพๅฆ my_provider.py:
"""
My Custom LLM Provider Implementation
"""
from pathlib import Path
from typing import Optional
from ..logger import get_logger
from .provider import LLMProvider, LLMResponse
_logger = get_logger("my_provider")
class MyProvider(LLMProvider):
"""
Provider for My Custom LLM Service.
"""
def __init__(
self,
model_name: str = "my-model-v1",
api_key: Optional[str] = None,
base_url: str = "https://api.my-llm.com",
log_dir: Optional[Path] = None,
):
self._model_name = model_name
self.api_key = api_key
self.base_url = base_url.rstrip("/")
self.log_dir = log_dir or Path("logs")
# Initialize your client here
self._client = None
if self.api_key:
self._client = self._initialize_client()
def _initialize_client(self):
"""Initialize the API client"""
# Your initialization logic
pass
@property
def model_name(self) -> str:
return self._model_name
@property
def provider_name(self) -> str:
"""Optional: Provider identifier"""
return "my_provider"
@property
def is_available(self) -> bool:
"""Check if the provider is available and configured"""
if not self.api_key:
return False
try:
# Perform a health check
# e.g., ping the API
return True
except Exception:
return False
def generate(
self,
prompt: str,
context: str = "",
system_instruction: str = "",
timeout_seconds: int = 600,
) -> tuple[str, bool]:
"""
Generate text using the LLM.
Args:
prompt: The user's prompt
context: Additional context (code, documentation, etc.)
system_instruction: System-level instructions
timeout_seconds: Request timeout
Returns:
tuple[str, bool]: (generated_text, success)
"""
if not self.is_available:
return "Error: Provider not available", False
try:
# Build the full prompt
full_prompt = f"{context}\n\n{prompt}" if context else prompt
# Make the API call
response = self._call_api(full_prompt, system_instruction, timeout_seconds)
return response, True
except Exception as e:
_logger.error(f"Generation failed: {e}")
return str(e), False
def generate_with_tools(
self,
prompt: str,
context: str = "",
system_instruction: str = "",
timeout_seconds: int = 600,
) -> LLMResponse:
"""
Generate text with function calling support.
If your provider doesn't support native function calling,
you can parse tool calls from the text response.
"""
text, success = self.generate(prompt, context, system_instruction, timeout_seconds)
# If your provider supports function calling natively:
# function_calls = self._extract_function_calls(raw_response)
return LLMResponse(
text=text,
function_calls=[], # Populate if supported
success=success,
)
def _call_api(
self, prompt: str, system_instruction: str, timeout: int
) -> str:
"""Make the actual API call"""
import requests
response = requests.post(
f"{self.base_url}/v1/chat/completions",
headers={"Authorization": f"Bearer {self.api_key}"},
json={
"model": self.model_name,
"messages": [
{"role": "system", "content": system_instruction},
{"role": "user", "content": prompt},
],
},
timeout=timeout,
)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
def get_token_usage(self) -> dict[str, int]:
"""Return token usage if tracked"""
return {
"input_tokens": 0,
"output_tokens": 0,
"total_tokens": 0,
}
ๆญฅ้ฉ 2: ่จปๅ Provider
ๅจ src/boring/llm/__init__.py ไธญๅฐๅบ:
from .my_provider import MyProvider
__all__ = [
"LLMProvider",
"GeminiProvider",
"OllamaProvider",
"MyProvider", # ๆฐๅข
]
ๆญฅ้ฉ 3: ๆทปๅ ้ ็ฝฎๆฏๆ
ๅจ src/boring/config.py ๆทปๅ ้
็ฝฎ:
# ๆฐ Provider ่จญ็ฝฎ
MY_PROVIDER_API_KEY: Optional[str] = os.getenv("MY_PROVIDER_API_KEY")
MY_PROVIDER_MODEL: str = os.getenv("MY_PROVIDER_MODEL", "my-model-v1")
MY_PROVIDER_BASE_URL: str = os.getenv(
"MY_PROVIDER_BASE_URL", "https://api.my-llm.com"
)
ๆญฅ้ฉ 4: ๆทปๅ Provider ้ธๆ้่ผฏ
ๅจ้่ฆไฝฟ็จ LLM ็ๅฐๆนๆทปๅ ้ธๆ้่ผฏ:
from boring.config import settings
from boring.llm import GeminiProvider, OllamaProvider, MyProvider
def get_llm_provider() -> LLMProvider:
"""Get the configured LLM provider with fallback chain"""
# Priority: Gemini > MyProvider > Ollama (local)
providers = [
lambda: GeminiProvider() if settings.GOOGLE_API_KEY else None,
lambda: MyProvider() if settings.MY_PROVIDER_API_KEY else None,
lambda: OllamaProvider("llama3.2") if OllamaProvider("llama3.2").is_available else None,
]
for get_provider in providers:
provider = get_provider()
if provider and provider.is_available:
return provider
raise RuntimeError("No LLM provider available")
๐งช ๆธฌ่ฉฆไฝ ็ Provider
ๅฎๅ ๆธฌ่ฉฆ
ๅตๅปบ tests/unit/llm/test_my_provider.py:
import pytest
from unittest.mock import Mock, patch
from boring.llm.my_provider import MyProvider
class TestMyProvider:
"""Tests for MyProvider"""
def test_initialization(self):
"""Test provider initialization"""
provider = MyProvider(
model_name="test-model",
api_key="test-key",
)
assert provider.model_name == "test-model"
assert provider.api_key == "test-key"
def test_is_available_without_key(self):
"""Test availability check without API key"""
provider = MyProvider(api_key=None)
assert provider.is_available is False
@patch("boring.llm.my_provider.requests.post")
def test_generate_success(self, mock_post):
"""Test successful generation"""
mock_post.return_value.status_code = 200
mock_post.return_value.json.return_value = {
"choices": [{"message": {"content": "Hello, World!"}}]
}
provider = MyProvider(api_key="test-key")
text, success = provider.generate("Say hello")
assert success is True
assert "Hello" in text
@patch("boring.llm.my_provider.requests.post")
def test_generate_failure(self, mock_post):
"""Test generation failure handling"""
mock_post.side_effect = Exception("API Error")
provider = MyProvider(api_key="test-key")
text, success = provider.generate("Say hello")
assert success is False
assert "Error" in text or "API Error" in text
ๆดๅๆธฌ่ฉฆ
ๅตๅปบ tests/integration/test_my_provider_integration.py:
import os
import pytest
from boring.llm.my_provider import MyProvider
@pytest.mark.integration
@pytest.mark.skipif(
not os.getenv("MY_PROVIDER_API_KEY"),
reason="MY_PROVIDER_API_KEY not set"
)
class TestMyProviderIntegration:
"""Integration tests for MyProvider (requires real API key)"""
def test_real_generation(self):
"""Test real API call"""
provider = MyProvider(api_key=os.getenv("MY_PROVIDER_API_KEY"))
text, success = provider.generate("What is 2+2? Reply with just the number.")
assert success is True
assert "4" in text
๐ ๅฏฆ็พๆชขๆฅๆธ ๅฎ
ๅจๆไบค PR ๅ็ขบ่ช:
- [ ] ๅฏฆ็พไบ
LLMProvider็ๆๆๆฝ่ฑกๆนๆณ - [ ] ่็ไบ API ้ฏ่ชคๅ่ถ ๆ
- [ ] ๆทปๅ ไบ้ฉ็ถ็ๆฅ่ช่จ้
- [ ] ้ ็ฝฎ้้็ฐๅข่ฎ้่ฎๅ
- [ ] ๆ
is_availableๅฅๅบทๆชขๆฅ - [ ] ็ทจๅฏซไบๅฎๅ ๆธฌ่ฉฆ (โฅ80% ่ฆ่)
- [ ] ็ทจๅฏซไบๆดๅๆธฌ่ฉฆ (ๅฏ้ธ๏ผ้็ๅฏฆ API)
- [ ] ๆดๆฐไบ
docs/reference/feature-matrix.md - [ ] ๆดๆฐไบ
pyproject.tomlๆทปๅ ๅฏ้ธไพ่ณด (ๅฆ้) - [ ] ๆดๆฐไบ README ่ชชๆๆฐ Provider
๐ ๅ่ฝ้็ด็ญ็ฅ
้็ด้
Gemini (้ฒ็ซฏ) โ Ollama (ๆฌๅฐ) โ ๅ่ฝ็ฆ็จ
โ โ โ
โผ โผ โผ
API Key ๆฌๅฐ้่ก ้ฏ่ชคๆ็คบ
ๅฏฆ็พ้็ด
def generate_with_fallback(prompt: str) -> tuple[str, bool]:
"""Generate with automatic fallback to available providers"""
providers = [
("gemini", lambda: GeminiProvider()),
("ollama", lambda: OllamaProvider("llama3.2")),
]
for name, get_provider in providers:
try:
provider = get_provider()
if provider.is_available:
return provider.generate(prompt)
except Exception as e:
_logger.warning(f"Provider {name} failed: {e}")
continue
return "Error: All LLM providers unavailable", False
๐ท๏ธ ็พๆ Provider ๅ่
| Provider | ๆไปถ | ็น้ป |
|---|---|---|
GeminiProvider |
gemini.py |
SDK + CLI ้ๆจกๅผ |
OllamaProvider |
ollama.py |
ๆฌๅฐ้่ก๏ผ็ก้ API Key |
OpenAICompatProvider |
openai_compat.py |
้็จ OpenAI ๅ ผๅฎน API |
ClaudeAdapter |
claude_adapter.py |
Anthropic Claude |
๐ค ่ฒข็ป
ๅฆๆไฝ ๅฏฆ็พไบๆฐ็ Provider๏ผๆญก่ฟๆไบค PR๏ผ
่ซ็ขบไฟ: 1. ้ตๅพชๆฌๆๅ็่ฆ็ฏ 2. ้้ๆๆๆธฌ่ฉฆ 3. ๆดๆฐๆๆช
่ฉณ่ฆ- Contributing Guide
ๆๅพๆดๆฐ: 2026-01-12 | ็ๆฌ: 1.0.0