general cleanup

This commit is contained in:
lyx0 2024-03-05 21:02:12 +01:00
parent 160e0b73fb
commit 45757905ee
5 changed files with 76 additions and 76 deletions

View file

@ -6,9 +6,7 @@ import (
"github.com/gempir/go-twitch-irc/v4"
)
// handleCommand takes in a twitch.PrivateMessage and then routes the message
// to the function that is responsible for each command and knows how to deal
// with it accordingly.
// handleCommand is called each time a message starts with "()"
func (app *application) handleCommand(message twitch.PrivateMessage) {
var reply string
@ -24,7 +22,7 @@ func (app *application) handleCommand(message twitch.PrivateMessage) {
if msgLen < 2 {
reply = "Not enough arguments provided. Usage: ()gpt <query>"
} else {
switch app.OllamaContext {
switch app.config.ollamaContext {
case "none":
app.generateNoContext(message.Channel, message.Message[6:len(message.Message)])
return
@ -39,7 +37,7 @@ func (app *application) handleCommand(message twitch.PrivateMessage) {
}
}
if reply != "" {
go app.Send(message.Channel, reply)
go app.send(message.Channel, reply)
return
}
}

View file

@ -12,12 +12,17 @@ OLLAMA_MODEL=wizard-vicuna-uncensored
# There are three context models to choose from, think of it like chat history.
# Each context is only stored until the bot is restarted.
# More information: https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
# Huge context will probably slow the bot down.
#
# OLLAMA_CONTEXT must be one of those three:
# none: No additional message context is provided to ollama
# general: Each message sent to ollama will be added to a general context store and provided on the next use.
# user: Each user gets their own context from the previous times they used the ()gpt command
# user: Each user gets their own context store from their previous interactions.
#
# "none" uses the /api/generate endpoint, "general" and "user" the /api/chat endpoint
# More information:
# /api/generate https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion
# /api/chat https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
OLLAMA_CONTEXT=user
# OLLAMA_SYSTEM provides general instructions to the ai model that it should follow.

View file

@ -39,45 +39,45 @@ func (app *application) chatUserContext(target, username, input string) {
olm.Role = "user"
olm.Content = input
app.UserMsgStore[username] = append(app.UserMsgStore[username], olm)
app.userMsgStore[username] = append(app.userMsgStore[username], olm)
requestBody.Model = app.OllamaModel
requestBody.System = app.OllamaSystem
requestBody.Messages = app.UserMsgStore[username]
requestBody.Model = app.config.ollamaModel
requestBody.System = app.config.ollamaSystem
requestBody.Messages = app.userMsgStore[username]
requestBody.Prompt = input
requestBody.Stream = false
marshalled, err := json.Marshal(requestBody)
if err != nil {
app.Log.Error(err)
app.log.Error(err)
}
resp, err := http.Post("http://localhost:11434/api/chat", "application/json", bytes.NewBuffer(marshalled))
if err != nil {
app.Log.Error(err.Error())
app.log.Error(err.Error())
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
app.Log.Error(err.Error())
app.log.Error(err.Error())
}
var responseObject ollamaResponse
if err := json.Unmarshal(body, &responseObject); err != nil {
app.Log.Error(err)
app.log.Error(err)
}
olm.Role = responseObject.Message.Role
olm.Content = responseObject.Message.Content
app.UserMsgStore[username] = append(app.UserMsgStore[username], olm)
app.userMsgStore[username] = append(app.userMsgStore[username], olm)
app.Log.Infow("Message context for username",
"Username", username,
"Personal Context", app.UserMsgStore[username],
app.log.Infow("Message context for username",
"username", username,
"app.userMsgStore[username]", app.userMsgStore[username],
)
app.Send(target, responseObject.Message.Content)
app.send(target, responseObject.Message.Content)
}
// chatGeneralContext provides additional message context from every past
@ -87,76 +87,76 @@ func (app *application) chatGeneralContext(target, input string) {
olm.Role = "user"
olm.Content = input
app.MsgStore = append(app.MsgStore, olm)
app.msgStore = append(app.msgStore, olm)
requestBody.Model = app.OllamaModel
requestBody.System = app.OllamaSystem
requestBody.Messages = app.MsgStore
requestBody.Model = app.config.ollamaModel
requestBody.System = app.config.ollamaSystem
requestBody.Messages = app.msgStore
requestBody.Prompt = input
requestBody.Stream = false
marshalled, err := json.Marshal(requestBody)
if err != nil {
app.Log.Error(err)
app.log.Error(err)
}
resp, err := http.Post("http://localhost:11434/api/chat", "application/json", bytes.NewBuffer(marshalled))
if err != nil {
app.Log.Error(err.Error())
app.log.Error(err.Error())
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
app.Log.Error(err.Error())
app.log.Error(err.Error())
}
var responseObject ollamaResponse
if err := json.Unmarshal(body, &responseObject); err != nil {
app.Log.Error(err)
app.log.Error(err)
}
olm.Role = responseObject.Message.Role
olm.Content = responseObject.Message.Content
app.MsgStore = append(app.MsgStore, olm)
app.msgStore = append(app.msgStore, olm)
app.Log.Infow("MsgStore",
"app.MsgStore", app.MsgStore,
app.log.Infow("app.msgStore",
"app.msgStore", app.msgStore,
)
app.Send(target, responseObject.Message.Content)
app.send(target, responseObject.Message.Content)
}
// generateNoContext provides no additional message context
func (app *application) generateNoContext(target, input string) {
var requestBody ollamaRequest
requestBody.Model = app.OllamaModel
requestBody.System = app.OllamaSystem
requestBody.Model = app.config.ollamaModel
requestBody.System = app.config.ollamaSystem
requestBody.Prompt = input
requestBody.Stream = false
marshalled, err := json.Marshal(requestBody)
if err != nil {
app.Log.Error(err)
app.log.Error(err)
}
resp, err := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(marshalled))
if err != nil {
app.Log.Error(err.Error())
app.log.Error(err.Error())
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
app.Log.Error(err.Error())
app.log.Error(err.Error())
}
var responseObject ollamaResponse
if err := json.Unmarshal(body, &responseObject); err != nil {
app.Log.Error(err)
app.log.Error(err)
}
app.Send(target, responseObject.Response)
app.send(target, responseObject.Response)
}

50
main.go
View file

@ -12,18 +12,17 @@ import (
type config struct {
twitchUsername string
twitchOauth string
commandPrefix string
ollamaModel string
ollamaContext string
ollamaSystem string
}
type application struct {
TwitchClient *twitch.Client
Log *zap.SugaredLogger
OllamaModel string
OllamaContext string
OllamaSystem string
Config config
UserMsgStore map[string][]ollamaMessage
MsgStore []ollamaMessage
twitchClient *twitch.Client
log *zap.SugaredLogger
config config
userMsgStore map[string][]ollamaMessage
msgStore []ollamaMessage
}
func main() {
@ -44,25 +43,24 @@ func main() {
sugar.Fatal("Error loading .env")
}
// Twitch account config
cfg.twitchUsername = os.Getenv("TWITCH_USERNAME")
cfg.twitchOauth = os.Getenv("TWITCH_OAUTH")
cfg.ollamaModel = os.Getenv("OLLAMA_MODEL")
cfg.ollamaContext = os.Getenv("OLLAMA_CONTEXT")
cfg.ollamaSystem = os.Getenv("OLLAMA_SYSTEM")
tc := twitch.NewClient(cfg.twitchUsername, cfg.twitchOauth)
userMsgStore := make(map[string][]ollamaMessage)
app := &application{
TwitchClient: tc,
Log: sugar,
OllamaModel: os.Getenv("OLLAMA_MODEL"),
OllamaContext: os.Getenv("OLLAMA_CONTEXT"),
OllamaSystem: os.Getenv("OLLAMA_SYSTEM"),
Config: cfg,
UserMsgStore: userMsgStore,
twitchClient: tc,
log: sugar,
config: cfg,
userMsgStore: userMsgStore,
}
// Received a PrivateMessage (normal chat message).
app.TwitchClient.OnPrivateMessage(func(message twitch.PrivateMessage) {
app.twitchClient.OnPrivateMessage(func(message twitch.PrivateMessage) {
// roomId is the Twitch UserID of the channel the message originated from.
// If there is no roomId something went really wrong.
roomId := message.Tags["room-id"]
@ -81,23 +79,23 @@ func main() {
}
})
app.TwitchClient.OnConnect(func() {
app.Log.Info("Successfully connected to Twitch Servers")
app.Log.Info("Ollama Context: ", app.OllamaContext)
app.Log.Info("Ollama System: ", app.OllamaSystem)
app.twitchClient.OnConnect(func() {
app.log.Info("Successfully connected to Twitch Servers")
app.log.Info("Ollama Context: ", app.config.ollamaContext)
app.log.Info("Ollama System: ", app.config.ollamaSystem)
})
channels := os.Getenv("TWITCH_CHANNELS")
channel := strings.Split(channels, ",")
for i := 0; i < len(channel); i++ {
app.TwitchClient.Join(channel[i])
app.TwitchClient.Say(channel[i], "MrDestructoid")
app.Log.Infof("Joining channel: %s", channel[i])
app.twitchClient.Join(channel[i])
app.twitchClient.Say(channel[i], "MrDestructoid")
app.log.Infof("Joining channel: %s", channel[i])
}
// Actually connect to chat.
err = app.TwitchClient.Connect()
err = app.twitchClient.Connect()
if err != nil {
panic(err)
}

21
send.go
View file

@ -39,13 +39,13 @@ func (app *application) checkMessage(text string) (bool, string) {
"message": text,
})
if err != nil {
app.Log.Error(err)
app.log.Error(err)
return true, "could not check banphrase api"
}
resp, err := http.Post(banPhraseUrl, "application/json", bytes.NewBuffer(reqBody))
if err != nil {
app.Log.Error(err)
app.log.Error(err)
return true, "could not check banphrase api"
}
@ -53,12 +53,12 @@ func (app *application) checkMessage(text string) (bool, string) {
body, err := io.ReadAll(resp.Body)
if err != nil {
app.Log.Error(err)
app.log.Error(err)
}
var responseObject banphraseResponse
if err := json.Unmarshal(body, &responseObject); err != nil {
app.Log.Error(err)
app.log.Error(err)
return true, "could not check banphrase api"
}
@ -79,7 +79,7 @@ func (app *application) checkMessage(text string) (bool, string) {
// Send is used to send twitch replies and contains the necessary safeguards and logic for that.
// Send also logs the twitch.PrivateMessage contents into the database.
func (app *application) Send(target, message string) {
func (app *application) send(target, message string) {
// Message we are trying to send is empty.
if len(message) == 0 {
return
@ -110,24 +110,23 @@ func (app *application) Send(target, message string) {
firstMessage := message[0:499]
secondMessage := message[499:]
app.TwitchClient.Say(target, firstMessage)
app.TwitchClient.Say(target, secondMessage)
app.twitchClient.Say(target, firstMessage)
app.twitchClient.Say(target, secondMessage)
return
} else {
// Message was fine.
go app.TwitchClient.Say(target, message)
go app.twitchClient.Say(target, message)
return
}
} else {
// Bad message, replace message and log it.
app.TwitchClient.Say(target, "[BANPHRASED] monkaS")
app.Log.Infow("banned message detected",
app.twitchClient.Say(target, "[BANPHRASED] monkaS")
app.log.Infow("banned message detected",
"target channel", target,
"message", message,
"ban reason", banReason,
)
return
}
}