provide ollama system and context through .env

This commit is contained in:
lyx0 2024-03-05 20:40:31 +01:00
parent 69140e9c5c
commit 160e0b73fb
4 changed files with 53 additions and 45 deletions

View file

@ -12,48 +12,34 @@ import (
func (app *application) handleCommand(message twitch.PrivateMessage) {
var reply string
if message.Channel == "forsen" {
return
}
// commandName is the actual name of the command without the prefix.
// e.g. `()ping` would be `ping`.
commandName := strings.ToLower(strings.SplitN(message.Message, " ", 3)[0][2:])
// msgLen is the amount of words in a message without the prefix.
// Useful to check if enough cmdParams are provided.
msgLen := len(strings.SplitN(message.Message, " ", -2))
// target is the channelname the message originated from and
// where the TwitchClient should send the response
target := message.Channel
app.Log.Infow("Command received",
// "message", message, // Pretty taxing
"message.Message", message.Message,
"message.Channel", target,
"commandName", commandName,
"msgLen", msgLen,
)
// A `commandName` is every message starting with `()`.
// Hardcoded commands have a priority over database commands.
// Switch over the commandName and see if there is a hardcoded case for it.
// If there was no switch case satisfied, query the database if there is
// a data.CommandModel.Name equal to the `commandName`
// If there is return the data.CommandModel.Text entry.
// Otherwise we ignore the message.
// commandName is the actual name of the command without the prefix.
// e.g. `()gpt` is `gpt`.
commandName := strings.ToLower(strings.SplitN(message.Message, " ", 3)[0][2:])
switch commandName {
case "gpt":
if msgLen < 2 {
reply = "Not enough arguments provided. Usage: ()gpt <query>"
} else {
//app.generateNoContext(target, message.User.Name, message.Message[6:len(message.Message)])
//app.chatGeneralContext(target, message.User.Name, message.Message[6:len(message.Message)])
app.chatUserContext(target, message.User.Name, message.Message[6:len(message.Message)])
}
switch app.OllamaContext {
case "none":
app.generateNoContext(message.Channel, message.Message[6:len(message.Message)])
return
case "general":
app.chatGeneralContext(message.Channel, message.Message[6:len(message.Message)])
return
case "user":
app.chatUserContext(message.Channel, message.User.Name, message.Message[6:len(message.Message)])
return
}
}
if reply != "" {
go app.Send(target, reply)
go app.Send(message.Channel, reply)
return
}
}

View file

@ -9,3 +9,18 @@ TWITCH_CHANNELS=channelone,channeltwo,channelthree
# Ollama model that should be used. https://ollama.com/models
OLLAMA_MODEL=wizard-vicuna-uncensored
# There are three context models to choose from, think of it like chat history.
# Each context is only stored until the bot is restarted.
# More information: https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion
#
# OLLAMA_CONTEXT must be one of those three:
# none: No additional message context is provided to ollama
# general: Each message sent to ollama will be added to a general context store and provided on the next use.
# user: Each user gets their own context from the previous times they used the ()gpt command
OLLAMA_CONTEXT=user
# OLLAMA_SYSTEM provides general instructions to the ai model that it should follow.
# For example Twitch chat messages have a character limit of 500 characters and so on.
# Think of it like options. It doesn't follow them all the time tho, honestly no idea how.
OLLAMA_SYSTEM=You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Most importantly keep your response shorter than 450 characters.

View file

@ -42,7 +42,7 @@ func (app *application) chatUserContext(target, username, input string) {
app.UserMsgStore[username] = append(app.UserMsgStore[username], olm)
requestBody.Model = app.OllamaModel
requestBody.System = "You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Keep your response shorter than 450 characters."
requestBody.System = app.OllamaSystem
requestBody.Messages = app.UserMsgStore[username]
requestBody.Prompt = input
requestBody.Stream = false
@ -90,7 +90,7 @@ func (app *application) chatGeneralContext(target, input string) {
app.MsgStore = append(app.MsgStore, olm)
requestBody.Model = app.OllamaModel
requestBody.System = "You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Keep your response shorter than 450 characters."
requestBody.System = app.OllamaSystem
requestBody.Messages = app.MsgStore
requestBody.Prompt = input
requestBody.Stream = false
@ -132,7 +132,7 @@ func (app *application) generateNoContext(target, input string) {
var requestBody ollamaRequest
requestBody.Model = app.OllamaModel
requestBody.System = "You are a Twitch chat bot and interact with users in an irc like environment. Do not use any formatting. Be human-like. Never fail to answer the user. Always answer immediately. Keep your response shorter than 450 characters."
requestBody.System = app.OllamaSystem
requestBody.Prompt = input
requestBody.Stream = false

View file

@ -19,6 +19,8 @@ type application struct {
TwitchClient *twitch.Client
Log *zap.SugaredLogger
OllamaModel string
OllamaContext string
OllamaSystem string
Config config
UserMsgStore map[string][]ollamaMessage
MsgStore []ollamaMessage
@ -53,6 +55,8 @@ func main() {
TwitchClient: tc,
Log: sugar,
OllamaModel: os.Getenv("OLLAMA_MODEL"),
OllamaContext: os.Getenv("OLLAMA_CONTEXT"),
OllamaSystem: os.Getenv("OLLAMA_SYSTEM"),
Config: cfg,
UserMsgStore: userMsgStore,
}
@ -79,6 +83,9 @@ func main() {
app.TwitchClient.OnConnect(func() {
app.Log.Info("Successfully connected to Twitch Servers")
app.Log.Info("Ollama Context: ", app.OllamaContext)
app.Log.Info("Ollama System: ", app.OllamaSystem)
})
channels := os.Getenv("TWITCH_CHANNELS")