mirror of
https://github.com/lyx0/ollama-twitch-bot.git
synced 2024-11-06 18:52:03 +01:00
provide ollama system and context through .env
This commit is contained in:
parent
69140e9c5c
commit
160e0b73fb
4 changed files with 53 additions and 45 deletions
48
commands.go
48
commands.go
|
@ -12,48 +12,34 @@ import (
|
|||
func (app *application) handleCommand(message twitch.PrivateMessage) {
|
||||
var reply string
|
||||
|
||||
if message.Channel == "forsen" {
|
||||
return
|
||||
}
|
||||
|
||||
// commandName is the actual name of the command without the prefix.
|
||||
// e.g. `()ping` would be `ping`.
|
||||
commandName := strings.ToLower(strings.SplitN(message.Message, " ", 3)[0][2:])
|
||||
|
||||
// msgLen is the amount of words in a message without the prefix.
|
||||
// Useful to check if enough cmdParams are provided.
|
||||
msgLen := len(strings.SplitN(message.Message, " ", -2))
|
||||
|
||||
// target is the channelname the message originated from and
|
||||
// where the TwitchClient should send the response
|
||||
target := message.Channel
|
||||
app.Log.Infow("Command received",
|
||||
// "message", message, // Pretty taxing
|
||||
"message.Message", message.Message,
|
||||
"message.Channel", target,
|
||||
"commandName", commandName,
|
||||
"msgLen", msgLen,
|
||||
)
|
||||
|
||||
// A `commandName` is every message starting with `()`.
|
||||
// Hardcoded commands have a priority over database commands.
|
||||
// Switch over the commandName and see if there is a hardcoded case for it.
|
||||
// If there was no switch case satisfied, query the database if there is
|
||||
// a data.CommandModel.Name equal to the `commandName`
|
||||
// If there is return the data.CommandModel.Text entry.
|
||||
// Otherwise we ignore the message.
|
||||
// commandName is the actual name of the command without the prefix.
|
||||
// e.g. `()gpt` is `gpt`.
|
||||
commandName := strings.ToLower(strings.SplitN(message.Message, " ", 3)[0][2:])
|
||||
switch commandName {
|
||||
case "gpt":
|
||||
if msgLen < 2 {
|
||||
reply = "Not enough arguments provided. Usage: ()gpt <query>"
|
||||
} else {
|
||||
//app.generateNoContext(target, message.User.Name, message.Message[6:len(message.Message)])
|
||||
//app.chatGeneralContext(target, message.User.Name, message.Message[6:len(message.Message)])
|
||||
app.chatUserContext(target, message.User.Name, message.Message[6:len(message.Message)])
|
||||
}
|
||||
switch app.OllamaContext {
|
||||
case "none":
|
||||
app.generateNoContext(message.Channel, message.Message[6:len(message.Message)])
|
||||
return
|
||||
|
||||
case "general":
|
||||
app.chatGeneralContext(message.Channel, message.Message[6:len(message.Message)])
|
||||
return
|
||||
|
||||
case "user":
|
||||
app.chatUserContext(message.Channel, message.User.Name, message.Message[6:len(message.Message)])
|
||||
return
|
||||
}
|
||||
}
|
||||
if reply != "" {
|
||||
go app.Send(target, reply)
|
||||
go app.Send(message.Channel, reply)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
15
env.example
15
env.example
|
@ -9,3 +9,18 @@ TWITCH_CHANNELS=channelone,channeltwo,channelthree
|
|||
|
||||
# Ollama model that should be used. https://ollama.com/models
|
||||
OLLAMA_MODEL=wizard-vicuna-uncensored
|
||||
|
||||
# There are three context models to choose from, think of it like chat history.
|
||||
# Each context is only stored until the bot is restarted.
|
||||
# More information: https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
|
||||
#
|
||||
# OLLAMA_CONTEXT must be one of those three:
|
||||
# none: No additional message context is provided to ollama
|
||||
# general: Each message sent to ollama will be added to a general context store and provided on the next use.
|
||||
# user: Each user gets their own context from the previous times they used the ()gpt command
|
||||
OLLAMA_CONTEXT=user
|
||||
|
||||
# OLLAMA_SYSTEM provides general instructions to the ai model that it should follow.
|
||||
# For example Twitch chat messages have a character limit of 500 characters and so on.
|
||||
# Think of it like options. It doesn't follow them all the time tho, honestly no idea how.
|
||||
OLLAMA_SYSTEM=You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Most importantly keep your response shorter than 450 characters.
|
||||
|
|
|
@ -42,7 +42,7 @@ func (app *application) chatUserContext(target, username, input string) {
|
|||
app.UserMsgStore[username] = append(app.UserMsgStore[username], olm)
|
||||
|
||||
requestBody.Model = app.OllamaModel
|
||||
requestBody.System = "You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Keep your response shorter than 450 characters."
|
||||
requestBody.System = app.OllamaSystem
|
||||
requestBody.Messages = app.UserMsgStore[username]
|
||||
requestBody.Prompt = input
|
||||
requestBody.Stream = false
|
||||
|
@ -90,7 +90,7 @@ func (app *application) chatGeneralContext(target, input string) {
|
|||
app.MsgStore = append(app.MsgStore, olm)
|
||||
|
||||
requestBody.Model = app.OllamaModel
|
||||
requestBody.System = "You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Keep your response shorter than 450 characters."
|
||||
requestBody.System = app.OllamaSystem
|
||||
requestBody.Messages = app.MsgStore
|
||||
requestBody.Prompt = input
|
||||
requestBody.Stream = false
|
||||
|
@ -132,7 +132,7 @@ func (app *application) generateNoContext(target, input string) {
|
|||
var requestBody ollamaRequest
|
||||
|
||||
requestBody.Model = app.OllamaModel
|
||||
requestBody.System = "You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Keep your response shorter than 450 characters."
|
||||
requestBody.System = app.OllamaSystem
|
||||
requestBody.Prompt = input
|
||||
requestBody.Stream = false
|
||||
|
||||
|
|
29
main.go
29
main.go
|
@ -16,12 +16,14 @@ type config struct {
|
|||
}
|
||||
|
||||
type application struct {
|
||||
TwitchClient *twitch.Client
|
||||
Log *zap.SugaredLogger
|
||||
OllamaModel string
|
||||
Config config
|
||||
UserMsgStore map[string][]ollamaMessage
|
||||
MsgStore []ollamaMessage
|
||||
TwitchClient *twitch.Client
|
||||
Log *zap.SugaredLogger
|
||||
OllamaModel string
|
||||
OllamaContext string
|
||||
OllamaSystem string
|
||||
Config config
|
||||
UserMsgStore map[string][]ollamaMessage
|
||||
MsgStore []ollamaMessage
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -50,11 +52,13 @@ func main() {
|
|||
userMsgStore := make(map[string][]ollamaMessage)
|
||||
|
||||
app := &application{
|
||||
TwitchClient: tc,
|
||||
Log: sugar,
|
||||
OllamaModel: os.Getenv("OLLAMA_MODEL"),
|
||||
Config: cfg,
|
||||
UserMsgStore: userMsgStore,
|
||||
TwitchClient: tc,
|
||||
Log: sugar,
|
||||
OllamaModel: os.Getenv("OLLAMA_MODEL"),
|
||||
OllamaContext: os.Getenv("OLLAMA_CONTEXT"),
|
||||
OllamaSystem: os.Getenv("OLLAMA_SYSTEM"),
|
||||
Config: cfg,
|
||||
UserMsgStore: userMsgStore,
|
||||
}
|
||||
|
||||
// Received a PrivateMessage (normal chat message).
|
||||
|
@ -79,6 +83,9 @@ func main() {
|
|||
|
||||
app.TwitchClient.OnConnect(func() {
|
||||
app.Log.Info("Successfully connected to Twitch Servers")
|
||||
app.Log.Info("Ollama Context: ", app.OllamaContext)
|
||||
app.Log.Info("Ollama System: ", app.OllamaSystem)
|
||||
|
||||
})
|
||||
|
||||
channels := os.Getenv("TWITCH_CHANNELS")
|
||||
|
|
Loading…
Reference in a new issue