summaryrefslogtreecommitdiff
path: root/llm-chat.el
diff options
context:
space:
mode:
Diffstat (limited to 'llm-chat.el')
-rw-r--r--llm-chat.el93
1 files changed, 86 insertions, 7 deletions
diff --git a/llm-chat.el b/llm-chat.el
index 57b5288..ea3e92e 100644
--- a/llm-chat.el
+++ b/llm-chat.el
@@ -25,30 +25,109 @@
;; along with this program. If not, see <https://www.gnu.org/licenses/>.
;;; Code:
-
(require 'transient)
+(require 'llm-chat-api)
+(require 'llm-chat-replacements)
+(require 'llm-chat-gemini)
+(require 'llm-chat-claude)
+(require 'llm-chat-openai)
+(require 'llm-chat-deepseek)
(transient-define-prefix llm-chat-prompt-menu ()
["llm-chat prompt menu"
- ("u" "user prompt" ignore)
- ("s" "system prompt" ignore)])
+ ("u" "user prompt" llm-chat-insert-user-prompt)
+ ("y" "youtube summary" llm-chat-youtube-transcript-prompt)
+ ("s" "system prompt" llm-chat-insert-system-prompt)])
(transient-define-prefix llm-chat-settings-menu ()
["llm-chat settings menu"
- ("t" "temperature" ignore)
+ ("t" "temperature" llm-chat-set-temperature)
("s" "stream" ignore)])
+(transient-define-prefix llm-chat-model-menu ()
+ ["llm-chat settings menu"
+ ("dc" "deepseek-chat" (lambda () (interactive) (llm-chat-set-model "deepseek-chat")))
+ ("dk" "deepseek-coder" (lambda () (interactive )(llm-chat-set-model "deepseek-coder")))
+ ("dr" "deepseek-reasoner" (lambda () (interactive) (llm-chat-set-model "deepseek-reasoner")))
+ ("g4" "gpt-4.1" (lambda () (interactive) (llm-chat-set-model "gpt-4.1")))
+ ("g5" "gpt-5" (lambda () (interactive) (llm-chat-set-model "gpt-5")))
+ ("ch" "claude-3-5-haiku-latest" (lambda () (interactive) (llm-chat-set-model "claude-3-5-haiku-latest")))])
+
(transient-define-prefix llm-chat-menu ()
["llm-chat menu"
- ("p" "prompt evaluate" ignore)
+ ("p" "prompt evaluate" llm-chat-capture-prompt)
("i" "insert prompt" llm-chat-prompt-menu)
- ("m" "model select" ignore)
+ ("m" "model select" llm-chat-model-menu)
("hs" "history save" ignore)
("hl" "history load" ignore)
- ("hc" "history clear" ignore)
+ ("hc" "history clear" llm-chat-reset-history)
("s" "settings" llm-chat-settings-menu)
("q" "close menu" ignore)])
+(defun llm-chat-resolve-backend ()
+ (cond ((llm-chat-resolve-model-backend 'deepseek (llm-chat-get-model))
+ (llm-chat-make-deepseek-backend))
+ ((llm-chat-resolve-model-backend 'gemini (llm-chat-get-model))
+ (llm-chat-make-gemini-backend))
+ ((llm-chat-resolve-model-backend 'claude (llm-chat-get-model))
+ (llm-chat-make-claude-backend))
+ ((llm-chat-resolve-model-backend 'openai (llm-chat-get-model))
+ (llm-chat-make-openai-backend))
+ (t
+ (progn
+ (message "Supported model not selected cannot resolve backend")
+ nil))))
+
+(defun llm-chat-capture-prompt ()
+ (interactive)
+ (if-let* ((end-pos (point))
+ (search (re-search-backward "(\\(AI\\|USR\\|SYS\\)):" nil t))
+ (match (car (member (match-string 1) '("USR" "SYS"))))
+ (start-pos (+ (point) 6))
+ (prompt (buffer-substring-no-properties start-pos end-pos)))
+ (cond ((string= match "USR")
+ (when-let (backend (llm-chat-resolve-backend))
+ (if (not (string= (buffer-name) "*LLM Chat*"))
+ (with-current-buffer (get-buffer-create "*LLM Chat*")
+ (goto-char (point-max))
+ (insert prompt)))
+ (llm-chat-request backend
+ (llm-chat-bulk-replacements-with-dict prompt (llm-chat-get-replacements-dict)))
+ (goto-char end-pos)))
+ ((string= match "SYS")
+ (progn
+ (llm-chat-set-system-prompt prompt)
+ (goto-char end-pos))))
+ (message "could not process prompt, no (USR) or (SYS) prompt")))
+
+(defun llm-chat-youtube-transcript-prompt ()
+ (interactive)
+ (let ((file (read-file-name "Select YouTube transcript file: ")))
+ (when (file-exists-p file)
+ (with-current-buffer (get-buffer-create "*LLM Chat*")
+ (goto-char (point-max))
+ (insert "(USR): You are an expert summarizer tasked with creating a concise, structured summary of a YouTube video based on its subtitles. Follow these rules:
+
+1. **Identify the Core Topic**:
+ - What is the main subject of the video? (1 sentence)
+
+2. **Key Points**:
+ - Extract 3–5 central arguments, steps, or insights. Use bullet points.
+ - Ignore filler words, ads, or off-topic tangents.
+
+3. **Conclusion/Takeaway**:
+ - What should the viewer remember? (1–2 sentences)
+
+4. **Style**:
+ - Neutral tone, avoid opinionated language.
+ - Use plain English (no jargon unless necessary).
+ - Keep the summary under 200 words.
+
+Here are the subtitles:\n")
+ (insert-file-contents file)
+ (goto-char (point-max))
+ ))))
+
(provide 'llm-chat)
;;; llm-chat.el ends here