| | @@ -0,0 +1,100 @@ |
| 1 | +package oracle
|
| 2 | +
|
| 3 | +import (
|
| 4 | + "bytes"
|
| 5 | + "context"
|
| 6 | + "encoding/json"
|
| 7 | + "fmt"
|
| 8 | + "io"
|
| 9 | + "net/http"
|
| 10 | + "os"
|
| 11 | +)
|
| 12 | +
|
| 13 | +// OpenAIProvider calls any OpenAI-compatible chat completion API.
|
| 14 | +// Works with OpenAI, Anthropic (via compatibility layer), local Ollama, etc.
|
| 15 | +type OpenAIProvider struct {
|
| 16 | + BaseURL string // e.g. "https://api.openai.com/v1"
|
| 17 | + APIKey string
|
| 18 | + Model string
|
| 19 | + http *http.Client
|
| 20 | +}
|
| 21 | +
|
| 22 | +// NewOpenAIProvider creates a provider from environment variables:
|
| 23 | +//
|
| 24 | +// ORACLE_OPENAI_BASE_URL (default: https://api.openai.com/v1)
|
| 25 | +// ORACLE_OPENAI_API_KEY (required)
|
| 26 | +// ORACLE_OPENAI_MODEL (default: gpt-4o-mini)
|
| 27 | +func NewOpenAIProvider() *OpenAIProvider {
|
| 28 | + baseURL := os.Getenv("ORACLE_OPENAI_BASE_URL")
|
| 29 | + if baseURL == "" {
|
| 30 | + baseURL = "https://api.openai.com/v1"
|
| 31 | + }
|
| 32 | + model := os.Getenv("ORACLE_OPENAI_MODEL")
|
| 33 | + if model == "" {
|
| 34 | + model = "gpt-4o-mini"
|
| 35 | + }
|
| 36 | + return &OpenAIProvider{
|
| 37 | + BaseURL: baseURL,
|
| 38 | + APIKey: os.Getenv("ORACLE_OPENAI_API_KEY"),
|
| 39 | + Model: model,
|
| 40 | + http: &http.Client{},
|
| 41 | + }
|
| 42 | +}
|
| 43 | +
|
| 44 | +// Summarize calls the chat completions endpoint with the given prompt.
|
| 45 | +func (p *OpenAIProvider) Summarize(ctx context.Context, prompt string) (string, error) {
|
| 46 | + if p.APIKey == "" {
|
| 47 | + return "", fmt.Errorf("ORACLE_OPENAI_API_KEY is not set")
|
| 48 | + }
|
| 49 | +
|
| 50 | + body, _ := json.Marshal(map[string]any{
|
| 51 | + "model": p.Model,
|
| 52 | + "messages": []map[string]string{
|
| 53 | + {"role": "user", "content": prompt},
|
| 54 | + },
|
| 55 | + "max_tokens": 512,
|
| 56 | + })
|
| 57 | +
|
| 58 | + req, err := http.NewRequestWithContext(ctx, "POST", p.BaseURL+"/chat/completions", bytes.NewReader(body))
|
| 59 | + if err != nil {
|
| 60 | + return "", err
|
| 61 | + }
|
| 62 | + req.Header.Set("Authorization", "Bearer "+p.APIKey)
|
| 63 | + req.Header.Set("Content-Type", "application/json")
|
| 64 | +
|
| 65 | + resp, err := p.http.Do(req)
|
| 66 | + if err != nil {
|
| 67 | + return "", fmt.Errorf("openai request: %w", err)
|
| 68 | + }
|
| 69 | + defer resp.Body.Close()
|
| 70 | +
|
| 71 | + data, _ := io.ReadAll(resp.Body)
|
| 72 | + if resp.StatusCode != http.StatusOK {
|
| 73 | + return "", fmt.Errorf("openai error %d: %s", resp.StatusCode, string(data))
|
| 74 | + }
|
| 75 | +
|
| 76 | + var result struct {
|
| 77 | + Choices []struct {
|
| 78 | + Message struct {
|
| 79 | + Content string `json:"content"`
|
| 80 | + } `json:"message"`
|
| 81 | + } `json:"choices"`
|
| 82 | + }
|
| 83 | + if err := json.Unmarshal(data, &result); err != nil {
|
| 84 | + return "", fmt.Errorf("openai parse: %w", err)
|
| 85 | + }
|
| 86 | + if len(result.Choices) == 0 {
|
| 87 | + return "", fmt.Errorf("openai returned no choices")
|
| 88 | + }
|
| 89 | + return result.Choices[0].Message.Content, nil
|
| 90 | +}
|
| 91 | +
|
| 92 | +// StubProvider returns a fixed summary. Used in tests and when no LLM is configured.
|
| 93 | +type StubProvider struct {
|
| 94 | + Response string
|
| 95 | + Err error
|
| 96 | +}
|
| 97 | +
|
| 98 | +func (s *StubProvider) Summarize(_ context.Context, _ string) (string, error) {
|
| 99 | + return s.Response, s.Err
|
| 100 | +}
|