Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug in model options serialization #168

Closed
s-kostyaev opened this issue Mar 9, 2025 · 1 comment · Fixed by #169
Closed

Bug in model options serialization #168

s-kostyaev opened this issue Mar 9, 2025 · 1 comment · Fixed by #169

Comments

@s-kostyaev
Copy link
Contributor

Steps to reproduce:

(require 'llm-ollama)
(llm-chat (make-llm-ollama
	   :chat-model "qwen2.5:3b"
	   :default-chat-temperature 0.15
	   :default-chat-non-standard-params
	   '(("num_ctx" . 32768)
	     ("keep_alive" . 0)))
	  (llm-make-chat-prompt "hi"))

Result:

Debugger entered--Lisp error: (wrong-type-argument consp nil)
  json-serialize((:messages [(:role "user" :content "hi")] :model "qwen2.5:3b" :stream :false :keep_alive 0 :options (:temperature 0.15 (:num_ctx . 32768))))
  (encode-coding-string (json-serialize data) 'utf-8)
  (progn (encode-coding-string (json-serialize data) 'utf-8))
  (if data (progn (encode-coding-string (json-serialize data) 'utf-8)))
  (plz-media-type-request 'post url :as (list 'media-types plz-media-types) :body (if data (progn (encode-coding-string (json-serialize data) 'utf-8))) :connect-timeout llm-request-plz-connect-timeout :headers (append headers '(("Content-Type" . "application/json"))) :timeout (or timeout llm-request-plz-timeout))
  (let ((resp (plz-media-type-request 'post url :as (list 'media-types plz-media-types) :body (if data (progn (encode-coding-string (json-serialize data) 'utf-8))) :connect-timeout llm-request-plz-connect-timeout :headers (append headers '(("Content-Type" . "application/json"))) :timeout (or timeout llm-request-plz-timeout)))) (if (llm-request-success (let* ((cl-x resp)) (progn (or (let* (...) (progn ...)) (signal 'wrong-type-argument (list ... cl-x))) (aref cl-x 2)))) (let* ((cl-x resp)) (progn (or (let* ((cl-x cl-x)) (progn (and ... t))) (signal 'wrong-type-argument (list 'plz-response cl-x))) (aref cl-x 4))) (signal 'plz-http-error resp)))
  (condition-case error (let ((resp (plz-media-type-request 'post url :as (list 'media-types plz-media-types) :body (if data (progn (encode-coding-string ... ...))) :connect-timeout llm-request-plz-connect-timeout :headers (append headers '(...)) :timeout (or timeout llm-request-plz-timeout)))) (if (llm-request-success (let* ((cl-x resp)) (progn (or (let* ... ...) (signal ... ...)) (aref cl-x 2)))) (let* ((cl-x resp)) (progn (or (let* (...) (progn ...)) (signal 'wrong-type-argument (list ... cl-x))) (aref cl-x 4))) (signal 'plz-http-error resp))) (plz-error (progn (ignore (seqp error)) (let* ((x503 (seq--elt-safe error 2)) (x504 (seq--elt-safe error 1)) (x505 (seq--elt-safe error 0))) (let ((data x503) (message x504) (error-sym x505)) (cond ((eq ... error-sym) (let ... ...)) ((and ... ...) (error "LLM request timed out")) (t (signal error-sym ...))))))))
  (progn (let ((--cl-keys-- --cl-rest--)) (while --cl-keys-- (cond ((memq (car --cl-keys--) '(:headers :data :timeout :allow-other-keys)) (if (cdr --cl-keys--) nil (error "Missing argument for %s" (car --cl-keys--))) (setq --cl-keys-- (cdr (cdr --cl-keys--)))) ((car (cdr (memq ... --cl-rest--))) (setq --cl-keys-- nil)) (t (error "Keyword argument %s not one of (:headers :data :timeout)" (car --cl-keys--)))))) (condition-case error (let ((resp (plz-media-type-request 'post url :as (list 'media-types plz-media-types) :body (if data (progn ...)) :connect-timeout llm-request-plz-connect-timeout :headers (append headers '...) :timeout (or timeout llm-request-plz-timeout)))) (if (llm-request-success (let* ((cl-x resp)) (progn (or ... ...) (aref cl-x 2)))) (let* ((cl-x resp)) (progn (or (let* ... ...) (signal ... ...)) (aref cl-x 4))) (signal 'plz-http-error resp))) (plz-error (progn (ignore (seqp error)) (let* ((x503 (seq--elt-safe error 2)) (x504 (seq--elt-safe error 1)) (x505 (seq--elt-safe error 0))) (let ((data x503) (message x504) (error-sym x505)) (cond (... ...) (... ...) (t ...))))))))
  (let* ((headers (car (cdr (plist-member --cl-rest-- ':headers)))) (data (car (cdr (plist-member --cl-rest-- ':data)))) (timeout (car (cdr (plist-member --cl-rest-- ':timeout))))) (progn (let ((--cl-keys-- --cl-rest--)) (while --cl-keys-- (cond ((memq (car --cl-keys--) '...) (if (cdr --cl-keys--) nil (error "Missing argument for %s" ...)) (setq --cl-keys-- (cdr ...))) ((car (cdr ...)) (setq --cl-keys-- nil)) (t (error "Keyword argument %s not one of (:headers :data :timeout)" (car --cl-keys--)))))) (condition-case error (let ((resp (plz-media-type-request 'post url :as (list ... plz-media-types) :body (if data ...) :connect-timeout llm-request-plz-connect-timeout :headers (append headers ...) :timeout (or timeout llm-request-plz-timeout)))) (if (llm-request-success (let* (...) (progn ... ...))) (let* ((cl-x resp)) (progn (or ... ...) (aref cl-x 4))) (signal 'plz-http-error resp))) (plz-error (progn (ignore (seqp error)) (let* ((x503 ...) (x504 ...) (x505 ...)) (let (... ... ...) (cond ... ... ...))))))))
  llm-request-plz-sync("http://localhost:11434/api/chat" :headers nil :data (:messages [(:role "user" :content "hi")] :model "qwen2.5:3b" :stream :false :keep_alive 0 :options (:temperature 0.15 (:num_ctx . 32768))))
  #f(compiled-function (provider prompt &optional multi-output) #<bytecode 0x15a1f3b485c01575>)(#s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0))))
  apply(#f(compiled-function (provider prompt &optional multi-output) #<bytecode 0x15a1f3b485c01575>) (#s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)))))
  #f(compiled-function (&rest args) #<bytecode -0x4803e75d59adf41>)(#s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0))))
  apply(#f(compiled-function (&rest args) #<bytecode -0x4803e75d59adf41>) (#s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)))))
  #f(compiled-function (&rest args) #<bytecode -0xcb995ddbfd8af4b>)()
  #f(compiled-function (cl--cnm provider prompt &optional _) #<bytecode -0x1f31e375b7c74599>)(#f(compiled-function (&rest args) #<bytecode -0xcb995ddbfd8af4b>) #s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0))))
  apply(#f(compiled-function (cl--cnm provider prompt &optional _) #<bytecode -0x1f31e375b7c74599>) #f(compiled-function (&rest args) #<bytecode -0xcb995ddbfd8af4b>) (#s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)))))
  #f(compiled-function (provider prompt &optional _) "Log the input to llm-chat." #<bytecode -0x1b7195f7e20309d1>)(#s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0))))
  apply(#f(compiled-function (provider prompt &optional _) "Log the input to llm-chat." #<bytecode -0x1b7195f7e20309d1>) #s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0))))
  llm-chat(#s(llm-ollama :default-chat-temperature 0.15 :default-chat-max-tokens nil :default-chat-non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0)) :scheme "http" :host "localhost" :port 11434 :chat-model "qwen2.5:3b" :embedding-model nil) #s(llm-chat-prompt :context nil :examples nil :interactions (#s(llm-chat-prompt-interaction :role user :content "hi" :tool-results nil)) :tools nil :temperature 0.15 :max-tokens nil :response-format nil :non-standard-params (("num_ctx" . 32768) ("keep_alive" . 0))))
  (progn (llm-chat (let ((default-chat-temperature 0.15) (default-chat-max-tokens nil) (default-chat-non-standard-params '(("num_ctx" . 32768) ("keep_alive" . 0))) (scheme "http") (host "localhost") (port 11434) (chat-model "qwen2.5:3b") (embedding-model nil)) (progn (record 'llm-ollama default-chat-temperature default-chat-max-tokens default-chat-non-standard-params scheme host port chat-model embedding-model))) (llm-make-chat-prompt "hi")))
  eval((progn (llm-chat (let ((default-chat-temperature 0.15) (default-chat-max-tokens nil) (default-chat-non-standard-params '(... ...)) (scheme "http") (host "localhost") (port 11434) (chat-model "qwen2.5:3b") (embedding-model nil)) (progn (record 'llm-ollama default-chat-temperature default-chat-max-tokens default-chat-non-standard-params scheme host port chat-model embedding-model))) (llm-make-chat-prompt "hi"))) t)
  elisp--eval-last-sexp(nil)
  #f(compiled-function () #<bytecode 0x1866e462707a>)()
  eval-last-sexp(nil)
  funcall-interactively(eval-last-sexp nil)
  command-execute(eval-last-sexp)

Whithout :default-chat-temperature 0.15 or ("keep_alive" . 0) it works fine.

@s-kostyaev
Copy link
Contributor Author

llm-0.24.0

ahyatt added a commit that referenced this issue Mar 10, 2025
This was introduced in the recent change to handle the `keep-alive` option
separately.

This fixes #168.
ahyatt added a commit that referenced this issue Mar 10, 2025
This was introduced in the recent change to handle the `keep-alive`
option separately.

This fixes #168.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging a pull request may close this issue.

1 participant