diff options
-rw-r--r-- | docker-compose.yaml | 3 | ||||
-rw-r--r-- | main.go | 197 |
2 files changed, 81 insertions, 119 deletions
diff --git a/docker-compose.yaml b/docker-compose.yaml index a96c56b..49829c8 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -14,7 +14,8 @@ services: max-size: "100m" networks: - millanet - restart: unless-stopped + # restart: unless-stopped + restart: no command: ["--config", "/opt/milla/config.toml"] volumes: - ./config.toml:/opt/milla/config.toml @@ -11,6 +11,7 @@ import ( "net/http" "net/url" "os" + "strconv" "strings" "time" @@ -23,60 +24,48 @@ import ( "google.golang.org/api/option" ) +const ( + milli = 1000 +) + type TomlConfig struct { IrcServer string - IrcPort int IrcNick string IrcSaslUser string IrcSaslPass string - IrcChannels []string OllamaEndpoint string - Temp float64 - OllamaSystem string - RequestTimeout int - MillaReconnectDelay int - EnableSasl bool Model string ChromaStyle string ChromaFormatter string Provider string Apikey string + OllamaSystem string + Temp float64 + RequestTimeout int + MillaReconnectDelay int + IrcPort int + KeepAlive int + MemoryLImit int TopP float32 TopK int32 - Chat bool - Admins []string Color bool + EnableSasl bool SkipTLSVerify bool -} - -type OllamaResponse struct { - Response string `json:"response"` + Admins []string + IrcChannels []string } type OllamaRequestOptions struct { Temperature float64 `json:"temperature"` } -type OllamaChatMessage struct { +type OllamaChatResponse struct { Role string `json:"role"` Content string `json:"content"` } -type OllamaChatMessages struct { - Messages []OllamaChatMessage `json:"messages"` -} - type OllamaChatMessagesResponse struct { - Messages []OllamaChatMessage `json:"message"` -} - -type OllamaRequest struct { - Model string `json:"model"` - System string `json:"system"` - Prompt string `json:"prompt"` - Stream bool `json:"stream"` - Format string `json:"format"` - Options OllamaRequestOptions `json:"options"` + Messages OllamaChatResponse `json:"message"` } type OllamaChatRequest struct { @@ -84,12 +73,12 @@ type OllamaChatRequest struct { Stream bool `json:"stream"` Keep_alive time.Duration `json:"keep_alive"` Options OllamaRequestOptions `json:"options"` - Format string `json:"format"` - Messages OllamaChatMessages `json:"messages"` + Messages []MemoryElement `json:"messages"` } -type OllamaChatResponse struct { - Messages OllamaChatMessages `json:"messages"` +type MemoryElement struct { + Role string `json:"role"` + Content string `json:"content"` } func printResponse(resp *genai.GenerateContentResponse) string { @@ -108,6 +97,7 @@ func printResponse(resp *genai.GenerateContentResponse) string { } func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { + var Memory []MemoryElement irc := girc.New(girc.Config{ Server: appConfig.IrcServer, Port: appConfig.IrcPort, @@ -115,8 +105,10 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { User: appConfig.IrcNick, Name: appConfig.IrcNick, SSL: true, - TLSConfig: &tls.Config{InsecureSkipVerify: appConfig.SkipTLSVerify, - ServerName: appConfig.IrcServer}, + TLSConfig: &tls.Config{ + InsecureSkipVerify: appConfig.SkipTLSVerify, + ServerName: appConfig.IrcServer, + }, }) saslUser := appConfig.IrcSaslUser @@ -135,7 +127,8 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { } }) - if appConfig.Provider == "ollama" { + switch appConfig.Provider { + case "ollama": irc.Handlers.AddBg(girc.PRIVMSG, func(client *girc.Client, event girc.Event) { if strings.HasPrefix(event.Last(), appConfig.IrcNick+": ") { prompt := strings.TrimPrefix(event.Last(), appConfig.IrcNick+": ") @@ -144,52 +137,32 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { var jsonPayload []byte var err error - if appConfig.Chat { - ollamaRequest := OllamaChatRequest{ - Model: appConfig.Model, - Stream: false, - Format: "json", - Messages: OllamaChatMessages{ - []OllamaChatMessage{{ - Role: "user", - Content: prompt, - }}, - }, - Options: OllamaRequestOptions{ - Temperature: appConfig.Temp, - }, - } - jsonPayload, err = json.Marshal(ollamaRequest) - if err != nil { - client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) - - return - } - } else { - ollamaRequest := OllamaRequest{ - Model: appConfig.Model, - System: appConfig.OllamaSystem, - Prompt: prompt, - Stream: false, - Format: "json", - Options: OllamaRequestOptions{ - Temperature: appConfig.Temp, - }, - } - jsonPayload, err = json.Marshal(ollamaRequest) - if err != nil { - client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) - - return - } + memoryElement := MemoryElement{ + Role: "user", + Content: prompt, } - // jsonPayload, err := json.Marshal(ollamaRequest) - // if err != nil { - // client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) + if len(Memory) > appConfig.MemoryLImit { + Memory = Memory[:0] + } + Memory = append(Memory, memoryElement) + + ollamaRequest := OllamaChatRequest{ + Model: appConfig.Model, + Keep_alive: time.Duration(appConfig.KeepAlive), + Stream: false, + Messages: Memory, + Options: OllamaRequestOptions{ + Temperature: appConfig.Temp, + }, + } + jsonPayload, err = json.Marshal(ollamaRequest) + log.Printf(string(jsonPayload)) + if err != nil { + client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) - // return - // } + return + } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(appConfig.RequestTimeout)*time.Second) defer cancel() @@ -229,49 +202,37 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { defer response.Body.Close() var writer bytes.Buffer - if appConfig.Chat { - var ollamaChatResponse OllamaChatMessagesResponse - err = json.NewDecoder(response.Body).Decode(&ollamaChatResponse) - if err != nil { - client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) - } - err = quick.Highlight(&writer, - ollamaChatResponse.Messages[0].Content, - "markdown", - appConfig.ChromaFormatter, - appConfig.ChromaStyle) - if err != nil { - client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) + var ollamaChatResponse OllamaChatMessagesResponse + err = json.NewDecoder(response.Body).Decode(&ollamaChatResponse) + if err != nil { + client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) + } - return - } - } else { - var ollamaResponse OllamaResponse - err = json.NewDecoder(response.Body).Decode(&ollamaResponse) - if err != nil { - client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) + assistantElement := MemoryElement{ + Role: "assistant", + Content: ollamaChatResponse.Messages.Content, + } - return - } + Memory = append(Memory, assistantElement) - err = quick.Highlight(&writer, - ollamaResponse.Response, - "markdown", - appConfig.ChromaFormatter, - appConfig.ChromaStyle) - if err != nil { - client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) + log.Println(ollamaChatResponse) + err = quick.Highlight(&writer, + ollamaChatResponse.Messages.Content, + "markdown", + appConfig.ChromaFormatter, + appConfig.ChromaStyle) + if err != nil { + client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) - return - } + return } log.Println(writer.String()) client.Cmd.Reply(event, writer.String()) } }) - } else if appConfig.Provider == "gemini" { + case "gemini": irc.Handlers.AddBg(girc.PRIVMSG, func(client *girc.Client, event girc.Event) { if strings.HasPrefix(event.Last(), appConfig.IrcNick+": ") { prompt := strings.TrimPrefix(event.Last(), appConfig.IrcNick+": ") @@ -292,11 +253,11 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { clientGemini, err := genai.NewClient(ctx, option.WithAPIKey(appConfig.Apikey), option.WithHTTPClient(&httpClient)) // clientGemini, err := genai.NewClient(ctx, option.WithAPIKey(appConfig.Apikey)) - // if err != nil { - // client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) + if err != nil { + client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) - // return - // } + return + } defer clientGemini.Close() model := clientGemini.GenerativeModel(appConfig.Model) @@ -327,7 +288,7 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { client.Cmd.Reply(event, writer.String()) } }) - } else if appConfig.Provider == "chatgpt" { + case "chatgpt": irc.Handlers.AddBg(girc.PRIVMSG, func(client *girc.Client, event girc.Event) { if strings.HasPrefix(event.Last(), appConfig.IrcNick+": ") { prompt := strings.TrimPrefix(event.Last(), appConfig.IrcNick+": ") @@ -339,14 +300,14 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { allProxy := os.Getenv("ALL_PROXY") config := openai.DefaultConfig(appConfig.Apikey) if allProxy != "" { - proxyUrl, err := url.Parse(allProxy) + proxyURL, err := url.Parse(allProxy) if err != nil { client.Cmd.ReplyTo(event, fmt.Sprintf("error: %s", err.Error())) return } transport := &http.Transport{ - Proxy: http.ProxyURL(proxyUrl), + Proxy: http.ProxyURL(proxyURL), } config.HTTPClient = &http.Client{ @@ -401,7 +362,7 @@ func runIRC(appConfig TomlConfig, ircChan chan *girc.Client) { for { if err := irc.Connect(); err != nil { log.Println(err) - log.Println("reconnecting in {appConfig.MillaReconnectDelay/1000}") + log.Println("reconnecting in" + strconv.Itoa(appConfig.MillaReconnectDelay/milli)) time.Sleep(time.Duration(appConfig.MillaReconnectDelay) * time.Second) } else { return |