mirror of
https://github.com/lyx0/ollama-twitch-bot.git
synced 2024-11-06 18:52:03 +01:00
general cleanup
This commit is contained in:
parent
160e0b73fb
commit
45757905ee
5 changed files with 76 additions and 76 deletions
|
@ -6,9 +6,7 @@ import (
|
||||||
"github.com/gempir/go-twitch-irc/v4"
|
"github.com/gempir/go-twitch-irc/v4"
|
||||||
)
|
)
|
||||||
|
|
||||||
// handleCommand takes in a twitch.PrivateMessage and then routes the message
|
// handleCommand is called each time a message starts with "()"
|
||||||
// to the function that is responsible for each command and knows how to deal
|
|
||||||
// with it accordingly.
|
|
||||||
func (app *application) handleCommand(message twitch.PrivateMessage) {
|
func (app *application) handleCommand(message twitch.PrivateMessage) {
|
||||||
var reply string
|
var reply string
|
||||||
|
|
||||||
|
@ -24,7 +22,7 @@ func (app *application) handleCommand(message twitch.PrivateMessage) {
|
||||||
if msgLen < 2 {
|
if msgLen < 2 {
|
||||||
reply = "Not enough arguments provided. Usage: ()gpt <query>"
|
reply = "Not enough arguments provided. Usage: ()gpt <query>"
|
||||||
} else {
|
} else {
|
||||||
switch app.OllamaContext {
|
switch app.config.ollamaContext {
|
||||||
case "none":
|
case "none":
|
||||||
app.generateNoContext(message.Channel, message.Message[6:len(message.Message)])
|
app.generateNoContext(message.Channel, message.Message[6:len(message.Message)])
|
||||||
return
|
return
|
||||||
|
@ -39,7 +37,7 @@ func (app *application) handleCommand(message twitch.PrivateMessage) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if reply != "" {
|
if reply != "" {
|
||||||
go app.Send(message.Channel, reply)
|
go app.send(message.Channel, reply)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,12 +12,17 @@ OLLAMA_MODEL=wizard-vicuna-uncensored
|
||||||
|
|
||||||
# There are three context models to choose from, think of it like chat history.
|
# There are three context models to choose from, think of it like chat history.
|
||||||
# Each context is only stored until the bot is restarted.
|
# Each context is only stored until the bot is restarted.
|
||||||
# More information: https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
|
# Huge context will probably slow the bot down.
|
||||||
#
|
#
|
||||||
# OLLAMA_CONTEXT must be one of those three:
|
# OLLAMA_CONTEXT must be one of those three:
|
||||||
# none: No additional message context is provided to ollama
|
# none: No additional message context is provided to ollama
|
||||||
# general: Each message sent to ollama will be added to a general context store and provided on the next use.
|
# general: Each message sent to ollama will be added to a general context store and provided on the next use.
|
||||||
# user: Each user gets their own context from the previous times they used the ()gpt command
|
# user: Each user gets their own context store from their previous interactions.
|
||||||
|
#
|
||||||
|
# "none" uses the /api/generate endpoint, "general" and "user" the /api/chat endpoint
|
||||||
|
# More information:
|
||||||
|
# /api/generate https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion
|
||||||
|
# /api/chat https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
|
||||||
OLLAMA_CONTEXT=user
|
OLLAMA_CONTEXT=user
|
||||||
|
|
||||||
# OLLAMA_SYSTEM provides general instructions to the ai model that it should follow.
|
# OLLAMA_SYSTEM provides general instructions to the ai model that it should follow.
|
||||||
|
|
64
generate.go
64
generate.go
|
@ -39,45 +39,45 @@ func (app *application) chatUserContext(target, username, input string) {
|
||||||
|
|
||||||
olm.Role = "user"
|
olm.Role = "user"
|
||||||
olm.Content = input
|
olm.Content = input
|
||||||
app.UserMsgStore[username] = append(app.UserMsgStore[username], olm)
|
app.userMsgStore[username] = append(app.userMsgStore[username], olm)
|
||||||
|
|
||||||
requestBody.Model = app.OllamaModel
|
requestBody.Model = app.config.ollamaModel
|
||||||
requestBody.System = app.OllamaSystem
|
requestBody.System = app.config.ollamaSystem
|
||||||
requestBody.Messages = app.UserMsgStore[username]
|
requestBody.Messages = app.userMsgStore[username]
|
||||||
requestBody.Prompt = input
|
requestBody.Prompt = input
|
||||||
requestBody.Stream = false
|
requestBody.Stream = false
|
||||||
|
|
||||||
marshalled, err := json.Marshal(requestBody)
|
marshalled, err := json.Marshal(requestBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := http.Post("http://localhost:11434/api/chat", "application/json", bytes.NewBuffer(marshalled))
|
resp, err := http.Post("http://localhost:11434/api/chat", "application/json", bytes.NewBuffer(marshalled))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err.Error())
|
app.log.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err.Error())
|
app.log.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
var responseObject ollamaResponse
|
var responseObject ollamaResponse
|
||||||
if err := json.Unmarshal(body, &responseObject); err != nil {
|
if err := json.Unmarshal(body, &responseObject); err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
olm.Role = responseObject.Message.Role
|
olm.Role = responseObject.Message.Role
|
||||||
olm.Content = responseObject.Message.Content
|
olm.Content = responseObject.Message.Content
|
||||||
app.UserMsgStore[username] = append(app.UserMsgStore[username], olm)
|
app.userMsgStore[username] = append(app.userMsgStore[username], olm)
|
||||||
|
|
||||||
app.Log.Infow("Message context for username",
|
app.log.Infow("Message context for username",
|
||||||
"Username", username,
|
"username", username,
|
||||||
"Personal Context", app.UserMsgStore[username],
|
"app.userMsgStore[username]", app.userMsgStore[username],
|
||||||
)
|
)
|
||||||
app.Send(target, responseObject.Message.Content)
|
app.send(target, responseObject.Message.Content)
|
||||||
}
|
}
|
||||||
|
|
||||||
// chatGeneralContext provides additional message context from every past
|
// chatGeneralContext provides additional message context from every past
|
||||||
|
@ -87,76 +87,76 @@ func (app *application) chatGeneralContext(target, input string) {
|
||||||
|
|
||||||
olm.Role = "user"
|
olm.Role = "user"
|
||||||
olm.Content = input
|
olm.Content = input
|
||||||
app.MsgStore = append(app.MsgStore, olm)
|
app.msgStore = append(app.msgStore, olm)
|
||||||
|
|
||||||
requestBody.Model = app.OllamaModel
|
requestBody.Model = app.config.ollamaModel
|
||||||
requestBody.System = app.OllamaSystem
|
requestBody.System = app.config.ollamaSystem
|
||||||
requestBody.Messages = app.MsgStore
|
requestBody.Messages = app.msgStore
|
||||||
requestBody.Prompt = input
|
requestBody.Prompt = input
|
||||||
requestBody.Stream = false
|
requestBody.Stream = false
|
||||||
|
|
||||||
marshalled, err := json.Marshal(requestBody)
|
marshalled, err := json.Marshal(requestBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := http.Post("http://localhost:11434/api/chat", "application/json", bytes.NewBuffer(marshalled))
|
resp, err := http.Post("http://localhost:11434/api/chat", "application/json", bytes.NewBuffer(marshalled))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err.Error())
|
app.log.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err.Error())
|
app.log.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
var responseObject ollamaResponse
|
var responseObject ollamaResponse
|
||||||
if err := json.Unmarshal(body, &responseObject); err != nil {
|
if err := json.Unmarshal(body, &responseObject); err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
olm.Role = responseObject.Message.Role
|
olm.Role = responseObject.Message.Role
|
||||||
olm.Content = responseObject.Message.Content
|
olm.Content = responseObject.Message.Content
|
||||||
app.MsgStore = append(app.MsgStore, olm)
|
app.msgStore = append(app.msgStore, olm)
|
||||||
|
|
||||||
app.Log.Infow("MsgStore",
|
app.log.Infow("app.msgStore",
|
||||||
"app.MsgStore", app.MsgStore,
|
"app.msgStore", app.msgStore,
|
||||||
)
|
)
|
||||||
app.Send(target, responseObject.Message.Content)
|
app.send(target, responseObject.Message.Content)
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateNoContext provides no additional message context
|
// generateNoContext provides no additional message context
|
||||||
func (app *application) generateNoContext(target, input string) {
|
func (app *application) generateNoContext(target, input string) {
|
||||||
var requestBody ollamaRequest
|
var requestBody ollamaRequest
|
||||||
|
|
||||||
requestBody.Model = app.OllamaModel
|
requestBody.Model = app.config.ollamaModel
|
||||||
requestBody.System = app.OllamaSystem
|
requestBody.System = app.config.ollamaSystem
|
||||||
requestBody.Prompt = input
|
requestBody.Prompt = input
|
||||||
requestBody.Stream = false
|
requestBody.Stream = false
|
||||||
|
|
||||||
marshalled, err := json.Marshal(requestBody)
|
marshalled, err := json.Marshal(requestBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(marshalled))
|
resp, err := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(marshalled))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err.Error())
|
app.log.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err.Error())
|
app.log.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
var responseObject ollamaResponse
|
var responseObject ollamaResponse
|
||||||
if err := json.Unmarshal(body, &responseObject); err != nil {
|
if err := json.Unmarshal(body, &responseObject); err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Send(target, responseObject.Response)
|
app.send(target, responseObject.Response)
|
||||||
}
|
}
|
||||||
|
|
50
main.go
50
main.go
|
@ -12,18 +12,17 @@ import (
|
||||||
type config struct {
|
type config struct {
|
||||||
twitchUsername string
|
twitchUsername string
|
||||||
twitchOauth string
|
twitchOauth string
|
||||||
commandPrefix string
|
ollamaModel string
|
||||||
|
ollamaContext string
|
||||||
|
ollamaSystem string
|
||||||
}
|
}
|
||||||
|
|
||||||
type application struct {
|
type application struct {
|
||||||
TwitchClient *twitch.Client
|
twitchClient *twitch.Client
|
||||||
Log *zap.SugaredLogger
|
log *zap.SugaredLogger
|
||||||
OllamaModel string
|
config config
|
||||||
OllamaContext string
|
userMsgStore map[string][]ollamaMessage
|
||||||
OllamaSystem string
|
msgStore []ollamaMessage
|
||||||
Config config
|
|
||||||
UserMsgStore map[string][]ollamaMessage
|
|
||||||
MsgStore []ollamaMessage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -44,25 +43,24 @@ func main() {
|
||||||
sugar.Fatal("Error loading .env")
|
sugar.Fatal("Error loading .env")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Twitch account config
|
|
||||||
cfg.twitchUsername = os.Getenv("TWITCH_USERNAME")
|
cfg.twitchUsername = os.Getenv("TWITCH_USERNAME")
|
||||||
cfg.twitchOauth = os.Getenv("TWITCH_OAUTH")
|
cfg.twitchOauth = os.Getenv("TWITCH_OAUTH")
|
||||||
|
cfg.ollamaModel = os.Getenv("OLLAMA_MODEL")
|
||||||
|
cfg.ollamaContext = os.Getenv("OLLAMA_CONTEXT")
|
||||||
|
cfg.ollamaSystem = os.Getenv("OLLAMA_SYSTEM")
|
||||||
tc := twitch.NewClient(cfg.twitchUsername, cfg.twitchOauth)
|
tc := twitch.NewClient(cfg.twitchUsername, cfg.twitchOauth)
|
||||||
|
|
||||||
userMsgStore := make(map[string][]ollamaMessage)
|
userMsgStore := make(map[string][]ollamaMessage)
|
||||||
|
|
||||||
app := &application{
|
app := &application{
|
||||||
TwitchClient: tc,
|
twitchClient: tc,
|
||||||
Log: sugar,
|
log: sugar,
|
||||||
OllamaModel: os.Getenv("OLLAMA_MODEL"),
|
config: cfg,
|
||||||
OllamaContext: os.Getenv("OLLAMA_CONTEXT"),
|
userMsgStore: userMsgStore,
|
||||||
OllamaSystem: os.Getenv("OLLAMA_SYSTEM"),
|
|
||||||
Config: cfg,
|
|
||||||
UserMsgStore: userMsgStore,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Received a PrivateMessage (normal chat message).
|
// Received a PrivateMessage (normal chat message).
|
||||||
app.TwitchClient.OnPrivateMessage(func(message twitch.PrivateMessage) {
|
app.twitchClient.OnPrivateMessage(func(message twitch.PrivateMessage) {
|
||||||
// roomId is the Twitch UserID of the channel the message originated from.
|
// roomId is the Twitch UserID of the channel the message originated from.
|
||||||
// If there is no roomId something went really wrong.
|
// If there is no roomId something went really wrong.
|
||||||
roomId := message.Tags["room-id"]
|
roomId := message.Tags["room-id"]
|
||||||
|
@ -81,23 +79,23 @@ func main() {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
app.TwitchClient.OnConnect(func() {
|
app.twitchClient.OnConnect(func() {
|
||||||
app.Log.Info("Successfully connected to Twitch Servers")
|
app.log.Info("Successfully connected to Twitch Servers")
|
||||||
app.Log.Info("Ollama Context: ", app.OllamaContext)
|
app.log.Info("Ollama Context: ", app.config.ollamaContext)
|
||||||
app.Log.Info("Ollama System: ", app.OllamaSystem)
|
app.log.Info("Ollama System: ", app.config.ollamaSystem)
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
channels := os.Getenv("TWITCH_CHANNELS")
|
channels := os.Getenv("TWITCH_CHANNELS")
|
||||||
channel := strings.Split(channels, ",")
|
channel := strings.Split(channels, ",")
|
||||||
for i := 0; i < len(channel); i++ {
|
for i := 0; i < len(channel); i++ {
|
||||||
app.TwitchClient.Join(channel[i])
|
app.twitchClient.Join(channel[i])
|
||||||
app.TwitchClient.Say(channel[i], "MrDestructoid")
|
app.twitchClient.Say(channel[i], "MrDestructoid")
|
||||||
app.Log.Infof("Joining channel: %s", channel[i])
|
app.log.Infof("Joining channel: %s", channel[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Actually connect to chat.
|
// Actually connect to chat.
|
||||||
err = app.TwitchClient.Connect()
|
err = app.twitchClient.Connect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
21
send.go
21
send.go
|
@ -39,13 +39,13 @@ func (app *application) checkMessage(text string) (bool, string) {
|
||||||
"message": text,
|
"message": text,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
return true, "could not check banphrase api"
|
return true, "could not check banphrase api"
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := http.Post(banPhraseUrl, "application/json", bytes.NewBuffer(reqBody))
|
resp, err := http.Post(banPhraseUrl, "application/json", bytes.NewBuffer(reqBody))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
return true, "could not check banphrase api"
|
return true, "could not check banphrase api"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,12 +53,12 @@ func (app *application) checkMessage(text string) (bool, string) {
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var responseObject banphraseResponse
|
var responseObject banphraseResponse
|
||||||
if err := json.Unmarshal(body, &responseObject); err != nil {
|
if err := json.Unmarshal(body, &responseObject); err != nil {
|
||||||
app.Log.Error(err)
|
app.log.Error(err)
|
||||||
return true, "could not check banphrase api"
|
return true, "could not check banphrase api"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ func (app *application) checkMessage(text string) (bool, string) {
|
||||||
|
|
||||||
// Send is used to send twitch replies and contains the necessary safeguards and logic for that.
|
// Send is used to send twitch replies and contains the necessary safeguards and logic for that.
|
||||||
// Send also logs the twitch.PrivateMessage contents into the database.
|
// Send also logs the twitch.PrivateMessage contents into the database.
|
||||||
func (app *application) Send(target, message string) {
|
func (app *application) send(target, message string) {
|
||||||
// Message we are trying to send is empty.
|
// Message we are trying to send is empty.
|
||||||
if len(message) == 0 {
|
if len(message) == 0 {
|
||||||
return
|
return
|
||||||
|
@ -110,24 +110,23 @@ func (app *application) Send(target, message string) {
|
||||||
firstMessage := message[0:499]
|
firstMessage := message[0:499]
|
||||||
secondMessage := message[499:]
|
secondMessage := message[499:]
|
||||||
|
|
||||||
app.TwitchClient.Say(target, firstMessage)
|
app.twitchClient.Say(target, firstMessage)
|
||||||
app.TwitchClient.Say(target, secondMessage)
|
app.twitchClient.Say(target, secondMessage)
|
||||||
|
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
// Message was fine.
|
// Message was fine.
|
||||||
go app.TwitchClient.Say(target, message)
|
go app.twitchClient.Say(target, message)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Bad message, replace message and log it.
|
// Bad message, replace message and log it.
|
||||||
app.TwitchClient.Say(target, "[BANPHRASED] monkaS")
|
app.twitchClient.Say(target, "[BANPHRASED] monkaS")
|
||||||
app.Log.Infow("banned message detected",
|
app.log.Infow("banned message detected",
|
||||||
"target channel", target,
|
"target channel", target,
|
||||||
"message", message,
|
"message", message,
|
||||||
"ban reason", banReason,
|
"ban reason", banReason,
|
||||||
)
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue