summaryrefslogtreecommitdiff
path: root/lib/lsg_irc
diff options
context:
space:
mode:
authorJordan Bracco <href@random.sh>2022-12-09 17:33:00 +0000
committerJordan Bracco <href@random.sh>2022-12-11 02:03:36 +0000
commitc88f2454f1cedb497131e6f1fa4429bd3a38185f (patch)
treefac2b19146e959bd2bd35d9f4388edf0cd0a226f /lib/lsg_irc
parentnew plugin: openai gpt (diff)
gpt improvements
Diffstat (limited to 'lib/lsg_irc')
-rw-r--r--lib/lsg_irc/gpt_plugin.ex195
1 files changed, 170 insertions, 25 deletions
diff --git a/lib/lsg_irc/gpt_plugin.ex b/lib/lsg_irc/gpt_plugin.ex
index f628f8d..bf615fb 100644
--- a/lib/lsg_irc/gpt_plugin.ex
+++ b/lib/lsg_irc/gpt_plugin.ex
@@ -1,55 +1,94 @@
defmodule LSG.IRC.GptPlugin do
require Logger
+ import Irc.Plugin.TempRef
def irc_doc() do
"""
# OpenAI GPT
- * **!gpt** list GPT tasks
- * **!gpt `[task]` `<task args>`** run a task
- * **?offensive `<content>`** is content offensive
+ Uses OpenAI's GPT-3 API to bring natural language prompts to your IRC channel.
+
+ _prompts_ are pre-defined prompts and parameters defined in the bot' CouchDB.
+
+ _Runs_ (results of the inference of a _prompt_) are also stored in CouchDB and
+ may be resumed.
+
+ * **!gpt** list GPT prompts
+ * **!gpt `[prompt]` `<prompt or args>`** run a prompt
+ * **+gpt `[short ref|run id]` `<prompt or args>`** continue a prompt
+ * **?gpt offensive `<content>`** is content offensive ?
+ * **?gpt show `[short ref|run id]`** run information and web link
+ * **?gpt `[prompt]`** prompt information and web link
"""
end
@couch_db "bot-plugin-openai-prompts"
+ @couch_run_db "bot-plugin-gpt-history"
@trigger "gpt"
def start_link() do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
+ defstruct [:temprefs]
+
+ def get_result(id) do
+ Couch.get(@couch_run_db, id)
+ end
+
+ def get_prompt(id) do
+ Couch.get(@couch_db, id)
+ end
+
def init(_) do
regopts = [plugin: __MODULE__]
{:ok, _} = Registry.register(IRC.PubSub, "trigger:#{@trigger}", regopts)
- {:ok, _} = Registry.register(IRC.PubSub, "trigger:offensive", regopts)
- {:ok, nil}
+ {:ok, %__MODULE__{temprefs: new_temp_refs()}}
end
- def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :bang, args: [task | args]}}}, state) do
- case Couch.get(@couch_db, task) do
- {:ok, task} -> task(m, task, Enum.join(args, " "))
- {:error, :not_found} -> m.replyfun.("gpt: no such task: #{task}")
+ def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :bang, args: [prompt | args]}}}, state) do
+ case Couch.get(@couch_db, prompt) do
+ {:ok, prompt} -> {:noreply, prompt(m, prompt, Enum.join(args, " "), state)}
+ {:error, :not_found} ->
+ m.replyfun.("gpt: no such prompt: #{prompt}")
+ {:noreply, state}
error ->
- Logger.info("gpt: task load error: #{inspect error}")
+ Logger.info("gpt: prompt load error: #{inspect error}")
m.replyfun.("gpt: database error")
+ {:noreply, state}
end
- {:noreply, state}
end
def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :bang, args: []}}}, state) do
case Couch.get(@couch_db, "_all_docs") do
- {:ok, %{"rows" => []}} -> m.replyfun.("gpt: no tasks available")
- {:ok, %{"rows" => tasks}} ->
- tasks = tasks |> Enum.map(fn(task) -> Map.get(task, "id") end) |> Enum.join(", ")
- m.replyfun.("gpt: tasks: #{tasks}")
+ {:ok, %{"rows" => []}} -> m.replyfun.("gpt: no prompts available")
+ {:ok, %{"rows" => prompts}} ->
+ prompts = prompts |> Enum.map(fn(prompt) -> Map.get(prompt, "id") end) |> Enum.join(", ")
+ m.replyfun.("gpt: prompts: #{prompts}")
error ->
- Logger.info("gpt: task load error: #{inspect error}")
+ Logger.info("gpt: prompt load error: #{inspect error}")
m.replyfun.("gpt: database error")
end
{:noreply, state}
end
- def handle_info({:irc, :trigger, "offensive", m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: text}}}, state) do
+ def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :plus, args: [ref_or_id | args]}}}, state) do
+ id = lookup_temp_ref(ref_or_id, state.temprefs, ref_or_id)
+ case Couch.get(@couch_run_db, id) do
+ {:ok, run} ->
+ Logger.debug("+gpt run: #{inspect run}")
+ {:noreply, continue_prompt(m, run, Enum.join(args, " "), state)}
+ {:error, :not_found} ->
+ m.replyfun.("gpt: ref or id not found or expired: #{inspect ref_or_id} (if using short ref, try using full id)")
+ {:noreply, state}
+ error ->
+ Logger.info("+gpt: run load error: #{inspect error}")
+ m.replyfun.("gpt: database error")
+ {:noreply, state}
+ end
+ end
+
+ def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: ["offensive" | text]}}}, state) do
text = Enum.join(text, " ")
{moderate?, moderation} = moderation(text, m.account.id)
reply = cond do
@@ -61,26 +100,132 @@ defmodule LSG.IRC.GptPlugin do
{:noreply, state}
end
- def handle_info(_, state) do
+ def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: ["show", ref_or_id]}}}, state) do
+ id = lookup_temp_ref(ref_or_id, state.temprefs, ref_or_id) || ref_or_id
+ url = if m.channel do
+ LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :result, m.network, LSGWeb.format_chan(m.channel), id)
+ else
+ LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :result, id)
+ end
+ m.replyfun.("→ #{url}")
+ {:noreply, state}
+ end
+
+ def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: [prompt]}}}, state) do
+ url = if m.channel do
+ LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :prompt, m.network, LSGWeb.format_chan(m.channel), prompt)
+ else
+ LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :prompt, prompt)
+ end
+ m.replyfun.("→ #{url}")
+ {:noreply, state}
+ end
+
+ def handle_info(info, state) do
+ Logger.debug("gpt: unhandled info: #{inspect info}")
{:noreply, state}
end
- defp task(msg, task = %{"type" => "completions", "prompt" => prompt}, content) do
- prompt = Tmpl.render(prompt, msg, %{"content" => content})
- args = Map.get(task, "openai_params")
+ defp continue_prompt(msg, run, content, state) do
+ prompt_id = Map.get(run, "prompt_id")
+ prompt_rev = Map.get(run, "prompt_rev")
+
+ original_prompt = case Couch.get(@couch_db, Map.get(run, "prompt_id")) do
+ {:ok, prompt} -> prompt
+ _ -> nil
+ end
+
+ continue_prompt = %{"_id" => Map.get(run, "prompt_id"),
+ "_rev" => Map.get(original_prompt, "_rev"),
+ "type" => Map.get(run, "type"),
+ "parent_run_id" => Map.get(run, "_id"),
+ "openai_params" => Map.get(run, "request") |> Map.delete("prompt")}
+
+ continue_prompt = if prompt_string = Map.get(original_prompt, "continue_prompt") do
+ full_text = get_in(run, ~w(request prompt)) <> "\n" <> Map.get(run, "response")
+ continue_prompt
+ |> Map.put("prompt", prompt_string)
+ |> Map.put("prompt_format", "liquid")
+ |> Map.put("prompt_liquid_variables", %{"previous" => full_text})
+ else
+ prompt_content_tag = if content != "", do: " {{content}}", else: ""
+ string = get_in(run, ~w(request prompt)) <> "\n" <> Map.get(run, "response") <> prompt_content_tag
+ continue_prompt
+ |> Map.put("prompt", string)
+ |> Map.put("prompt_format", "liquid")
+ end
+
+ prompt(msg, continue_prompt, content, state)
+ end
+
+ defp prompt(msg, prompt = %{"type" => "completions", "prompt" => prompt}, content, state) do
+ Logger.debug("gpt_plugin:prompt/4 #{inspect prompt}")
+ prompt = case Map.get(prompt, "prompt_format", "liquid") do
+ "liquid" -> Tmpl.render(prompt, msg, Map.merge(Map.get(prompt, "prompt_liquid_variables", %{}), %{"content" => content}))
+ "norender" -> prompt
+ end
+
+ args = Map.get(prompt, "openai_params")
|> Map.put("prompt", prompt)
|> Map.put("user", msg.account.id)
+
{moderate?, moderation} = moderation(content, msg.account.id)
if moderate?, do: msg.replyfun.("⚠️ offensive input: #{Enum.join(moderation, ", ")}")
+
Logger.debug("GPT: request #{inspect args}")
case OpenAi.post("/v1/completions", args) do
- {:ok, %{"choices" => [%{"text" => text} | _]}} ->
- {moderate?, moderation} = moderation(text, msg.account.id)
- if moderate?, do: msg.replyfun.("🚨 offensive output: #{Enum.join(moderation, ", ")}")
- msg.replyfun.(String.trim(text))
+ {:ok, %{"choices" => [%{"text" => text, "finish_reason" => finish_reason} | _], "usage" => usage, "id" => gpt_id, "created" => created}} ->
+ text = String.trim(text)
+ {o_moderate?, o_moderation} = moderation(text, msg.account.id)
+ if o_moderate?, do: msg.replyfun.("🚨 offensive output: #{Enum.join(o_moderation, ", ")}")
+ msg.replyfun.(text)
+ doc = %{"prompt_id" => Map.get(prompt, "_id"),
+ "prompt_rev" => Map.get(prompt, "_rev"),
+ "network" => msg.network,
+ "channel" => msg.channel,
+ "nick" => msg.sender.nick,
+ "account_id" => (if msg.account, do: msg.account.id),
+ "request" => args,
+ "response" => text,
+ "message_at" => msg.at,
+ "reply_at" => DateTime.utc_now(),
+ "gpt_id" => gpt_id,
+ "gpt_at" => created,
+ "gpt_usage" => usage,
+ "type" => "completions",
+ "parent_run_id" => Map.get(prompt, "parent_run_id"),
+ "moderation" => %{"input" => %{flagged: moderate?, categories: moderation},
+ "output" => %{flagged: o_moderate?, categories: o_moderation}
+ }
+ }
+ Logger.debug("Saving result to couch: #{inspect doc}")
+ {id, ref, temprefs} = case Couch.post(@couch_run_db, doc) do
+ {:ok, id, _rev} ->
+ {ref, temprefs} = put_temp_ref(id, state.temprefs)
+ {id, ref, temprefs}
+ error ->
+ Logger.error("Failed to save to Couch: #{inspect error}")
+ {nil, nil, state.temprefs}
+ end
+ stop = cond do
+ finish_reason == "stop" -> "s"
+ finish_reason == "length" -> " — truncated"
+ true -> " — #{finish_reason}"
+ end
+ msg.replyfun.(" ↪ #{ref}" <>
+ stop <>
+ " — #{Map.get(usage, "total_tokens", 0)}" <>
+ " (#{Map.get(usage, "prompt_tokens", 0)}/#{Map.get(usage, "completion_tokens", 0)}) tokens" <>
+ " — #{id || "save failed"}")
+ %__MODULE__{state | temprefs: temprefs}
+ {:error, atom} when is_atom(atom) ->
+ Logger.error("gpt error: #{inspect atom}")
+ msg.replyfun.("gpt: ☠️ #{to_string(atom)}")
+ state
error ->
Logger.error("gpt error: #{inspect error}")
msg.replyfun.("gpt: ☠️ ")
+ state
end
end