defmodule LSG.IRC.GptPlugin do require Logger import Irc.Plugin.TempRef def irc_doc() do """ # OpenAI GPT Uses OpenAI's GPT-3 API to bring natural language prompts to your IRC channel. _prompts_ are pre-defined prompts and parameters defined in the bot' CouchDB. _Runs_ (results of the inference of a _prompt_) are also stored in CouchDB and may be resumed. * **!gpt** list GPT prompts * **!gpt `[prompt]` ``** run a prompt * **+gpt `[short ref|run id]` ``** continue a prompt * **?gpt offensive ``** is content offensive ? * **?gpt show `[short ref|run id]`** run information and web link * **?gpt `[prompt]`** prompt information and web link """ end @couch_db "bot-plugin-openai-prompts" @couch_run_db "bot-plugin-gpt-history" @trigger "gpt" def start_link() do GenServer.start_link(__MODULE__, [], name: __MODULE__) end defstruct [:temprefs] def get_result(id) do Couch.get(@couch_run_db, id) end def get_prompt(id) do Couch.get(@couch_db, id) end def init(_) do regopts = [plugin: __MODULE__] {:ok, _} = Registry.register(IRC.PubSub, "trigger:#{@trigger}", regopts) {:ok, %__MODULE__{temprefs: new_temp_refs()}} end def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :bang, args: [prompt | args]}}}, state) do case Couch.get(@couch_db, prompt) do {:ok, prompt} -> {:noreply, prompt(m, prompt, Enum.join(args, " "), state)} {:error, :not_found} -> m.replyfun.("gpt: no such prompt: #{prompt}") {:noreply, state} error -> Logger.info("gpt: prompt load error: #{inspect error}") m.replyfun.("gpt: database error") {:noreply, state} end end def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :bang, args: []}}}, state) do case Couch.get(@couch_db, "_all_docs") do {:ok, %{"rows" => []}} -> m.replyfun.("gpt: no prompts available") {:ok, %{"rows" => prompts}} -> prompts = prompts |> Enum.map(fn(prompt) -> Map.get(prompt, "id") end) |> Enum.join(", ") m.replyfun.("gpt: prompts: #{prompts}") error -> Logger.info("gpt: prompt load error: #{inspect error}") m.replyfun.("gpt: database error") end {:noreply, state} end def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :plus, args: [ref_or_id | args]}}}, state) do id = lookup_temp_ref(ref_or_id, state.temprefs, ref_or_id) case Couch.get(@couch_run_db, id) do {:ok, run} -> Logger.debug("+gpt run: #{inspect run}") {:noreply, continue_prompt(m, run, Enum.join(args, " "), state)} {:error, :not_found} -> m.replyfun.("gpt: ref or id not found or expired: #{inspect ref_or_id} (if using short ref, try using full id)") {:noreply, state} error -> Logger.info("+gpt: run load error: #{inspect error}") m.replyfun.("gpt: database error") {:noreply, state} end end def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: ["offensive" | text]}}}, state) do text = Enum.join(text, " ") {moderate?, moderation} = moderation(text, m.account.id) reply = cond do moderate? -> "⚠️ #{Enum.join(moderation, ", ")}" !moderate? && moderation -> "👍" !moderate? -> "☠️ error" end m.replyfun.(reply) {:noreply, state} end def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: ["show", ref_or_id]}}}, state) do id = lookup_temp_ref(ref_or_id, state.temprefs, ref_or_id) || ref_or_id url = if m.channel do LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :result, m.network, LSGWeb.format_chan(m.channel), id) else LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :result, id) end m.replyfun.("→ #{url}") {:noreply, state} end def handle_info({:irc, :trigger, @trigger, m = %IRC.Message{trigger: %IRC.Trigger{type: :query, args: [prompt]}}}, state) do url = if m.channel do LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :prompt, m.network, LSGWeb.format_chan(m.channel), prompt) else LSGWeb.Router.Helpers.gpt_url(LSGWeb.Endpoint, :prompt, prompt) end m.replyfun.("→ #{url}") {:noreply, state} end def handle_info(info, state) do Logger.debug("gpt: unhandled info: #{inspect info}") {:noreply, state} end defp continue_prompt(msg, run, content, state) do prompt_id = Map.get(run, "prompt_id") prompt_rev = Map.get(run, "prompt_rev") original_prompt = case Couch.get(@couch_db, Map.get(run, "prompt_id")) do {:ok, prompt} -> prompt _ -> nil end continue_prompt = %{"_id" => Map.get(run, "prompt_id"), "_rev" => Map.get(original_prompt, "_rev"), "type" => Map.get(run, "type"), "parent_run_id" => Map.get(run, "_id"), "openai_params" => Map.get(run, "request") |> Map.delete("prompt")} continue_prompt = if prompt_string = Map.get(original_prompt, "continue_prompt") do full_text = get_in(run, ~w(request prompt)) <> "\n" <> Map.get(run, "response") continue_prompt |> Map.put("prompt", prompt_string) |> Map.put("prompt_format", "liquid") |> Map.put("prompt_liquid_variables", %{"previous" => full_text}) else prompt_content_tag = if content != "", do: " {{content}}", else: "" string = get_in(run, ~w(request prompt)) <> "\n" <> Map.get(run, "response") <> prompt_content_tag continue_prompt |> Map.put("prompt", string) |> Map.put("prompt_format", "liquid") end prompt(msg, continue_prompt, content, state) end defp prompt(msg, prompt = %{"type" => "completions", "prompt" => prompt}, content, state) do Logger.debug("gpt_plugin:prompt/4 #{inspect prompt}") prompt = case Map.get(prompt, "prompt_format", "liquid") do "liquid" -> Tmpl.render(prompt, msg, Map.merge(Map.get(prompt, "prompt_liquid_variables", %{}), %{"content" => content})) "norender" -> prompt end args = Map.get(prompt, "openai_params") |> Map.put("prompt", prompt) |> Map.put("user", msg.account.id) {moderate?, moderation} = moderation(content, msg.account.id) if moderate?, do: msg.replyfun.("⚠️ offensive input: #{Enum.join(moderation, ", ")}") Logger.debug("GPT: request #{inspect args}") case OpenAi.post("/v1/completions", args) do {:ok, %{"choices" => [%{"text" => text, "finish_reason" => finish_reason} | _], "usage" => usage, "id" => gpt_id, "created" => created}} -> text = String.trim(text) {o_moderate?, o_moderation} = moderation(text, msg.account.id) if o_moderate?, do: msg.replyfun.("🚨 offensive output: #{Enum.join(o_moderation, ", ")}") msg.replyfun.(text) doc = %{"prompt_id" => Map.get(prompt, "_id"), "prompt_rev" => Map.get(prompt, "_rev"), "network" => msg.network, "channel" => msg.channel, "nick" => msg.sender.nick, "account_id" => (if msg.account, do: msg.account.id), "request" => args, "response" => text, "message_at" => msg.at, "reply_at" => DateTime.utc_now(), "gpt_id" => gpt_id, "gpt_at" => created, "gpt_usage" => usage, "type" => "completions", "parent_run_id" => Map.get(prompt, "parent_run_id"), "moderation" => %{"input" => %{flagged: moderate?, categories: moderation}, "output" => %{flagged: o_moderate?, categories: o_moderation} } } Logger.debug("Saving result to couch: #{inspect doc}") {id, ref, temprefs} = case Couch.post(@couch_run_db, doc) do {:ok, id, _rev} -> {ref, temprefs} = put_temp_ref(id, state.temprefs) {id, ref, temprefs} error -> Logger.error("Failed to save to Couch: #{inspect error}") {nil, nil, state.temprefs} end stop = cond do finish_reason == "stop" -> "s" finish_reason == "length" -> " — truncated" true -> " — #{finish_reason}" end msg.replyfun.(" ↪ #{ref}" <> stop <> " — #{Map.get(usage, "total_tokens", 0)}" <> " (#{Map.get(usage, "prompt_tokens", 0)}/#{Map.get(usage, "completion_tokens", 0)}) tokens" <> " — #{id || "save failed"}") %__MODULE__{state | temprefs: temprefs} {:error, atom} when is_atom(atom) -> Logger.error("gpt error: #{inspect atom}") msg.replyfun.("gpt: ☠️ #{to_string(atom)}") state error -> Logger.error("gpt error: #{inspect error}") msg.replyfun.("gpt: ☠️ ") state end end defp moderation(content, user_id) do case OpenAi.post("/v1/moderations", %{"input" => content, "user" => user_id}) do {:ok, %{"results" => [%{"flagged" => true, "categories" => categories} | _]}} -> cat = categories |> Enum.filter(fn({_key, value}) -> value end) |> Enum.map(fn({key, _}) -> key end) {true, cat} {:ok, moderation} -> Logger.debug("gpt: moderation: not flagged, #{inspect moderation}") {false, true} error -> Logger.error("gpt: moderation error: #{inspect error}") {false, false} end end end