Chat Agent Example

 1package main
 2
 3import (
 4    "context"
 5    "encoding/json"
 6    "fmt"
 7    "os"
 8    
 9    "github.com/sashabaranov/go-openai"
10    "github.com/sashabaranov/go-openai/jsonschema"
11    "github.com/rModel/rModel"
12)
13
14func main() {
15    // Initialize brain configuration
16    bp := rModel.NewBrainPrint()
17    
18    // Register processing units
19    bp.AddNeuron("llm", chatLLM)       // LLM conversation module
20    bp.AddNeuron("action", callTools)  // Tool execution module
21
22    // Configure processing flow
23    _, _ = bp.AddEntryLink("llm")             // Entry point
24    continueLink, _ := bp.AddLink("llm", "action")  // LLM → Tools
25    _, _ = bp.AddLink("action", "llm")        // Tools → LLM
26    endLink, _ := bp.AddEndLink("llm")        // Exit point
27
28    // Configure decision routing
29    _ = bp.AddLinkToCastGroup("llm", "continue", continueLink)
30    _ = bp.AddLinkToCastGroup("llm", "end", endLink)
31    _ = bp.BindCastGroupSelectFunc("llm", llmNext)
32
33    // Start processing
34    brain := bp.Build()
35    _ = brain.EntryWithMemory(
36        "messages", []openai.ChatCompletionMessage{
37            {Role: openai.ChatMessageRoleUser, Content: "What's the weather in Boston today?"},
38        })
39    brain.Wait()
40
41    // Output final state
42    messages, _ := json.Marshal(brain.GetMemory("messages"))
43    fmt.Printf("Message history: %s\n", messages)
44}
 1var tools = []openai.Tool{
 2    {
 3        Type: openai.ToolTypeFunction,
 4        Function: &openai.FunctionDefinition{
 5            Name:        "get_current_weather",
 6            Description: "Retrieve current weather conditions",
 7            Parameters: jsonschema.Definition{
 8                Type: jsonschema.Object,
 9                Properties: map[string]jsonschema.Definition{
10                    "location": {
11                        Type:        jsonschema.String,
12                        Description: "City and state, e.g. San Francisco, CA",
13                    },
14                    "unit": {Type: jsonschema.String, Enum: []string{"celsius", "fahrenheit"}},
15                },
16                Required: []string{"location"},
17            },
18        },
19    },
20}
21
22func chatLLM(b rModel.BrainRuntime) error {
23    messages := b.GetMemory("messages").([]openai.ChatCompletionMessage)
24    
25    client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
26    resp, err := client.CreateChatCompletion(
27        context.Background(),
28        openai.ChatCompletionRequest{
29            Model:    openai.GPT3Dot5Turbo0125,
30            Messages: messages,
31            Tools:    tools,
32        },
33    )
34    
35    // Handle response
36    if err == nil && len(resp.Choices) > 0 {
37        msg := resp.Choices[0].Message
38        messages = append(messages, msg)
39        b.SetMemory("messages", messages)
40    }
41    return err
42}
 1func callTools(b rModel.BrainRuntime) error {
 2    messages := b.GetMemory("messages").([]openai.ChatCompletionMessage)
 3    lastMsg := messages[len(messages)-1]
 4
 5    for _, call := range lastMsg.ToolCalls {
 6        switch call.Function.Name {
 7        case "get_current_weather":
 8            // Execute weather API call (mock implementation)
 9            fmt.Printf("Executing %s with params: %s\n", 
10                call.Function.Name, call.Function.Arguments)
11            
12            // Simulate API response
13            messages = append(messages, openai.ChatCompletionMessage{
14                Role:       openai.ChatMessageRoleTool,
15                Content:    "Sunny, 22°C",
16                ToolCallID: call.ID,
17                Name:       call.Function.Name,
18            })
19        }
20    }
21    b.SetMemory("messages", messages)
22    return nil
23}
1func llmNext(b rModel.BrainRuntime) string {
2    messages := b.GetMemory("messages").([]openai.ChatCompletionMessage)
3    lastMsg := messages[len(messages)-1]
4    
5    if len(lastMsg.ToolCalls) > 0 {
6        return "continue"  // Requires tool execution
7    }
8    return "end"           // Conversation complete
9}
  • Initialization:
    • Configure processing nodes and connections
  • Message Handling:
    • User input → LLM processing
    • LLM response analysis for tool requirements
  • Tool Execution:
    • Parse function call parameters
    • Simulate/Make API calls
    • Store results back to memory
  • Response Generation:
    • Feed tool results back to LLM
    • Generate final user response
  • Termination
    • Exit when conversation completes