-
-
Notifications
You must be signed in to change notification settings - Fork 292
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Markdown renderer support #12
Comments
I just tried an experiment using Rich and a new Annoyingly I don't think I can support Here's the result: Should I add Rich as a dependency just for this feature? Maybe... especially if I might use Rich to add more features in the future. |
Here's the code for that prototype: diff --git a/llm/cli.py b/llm/cli.py
index 2ed4d8b..1a2c92c 100644
--- a/llm/cli.py
+++ b/llm/cli.py
@@ -4,6 +4,8 @@ import datetime
import json
import openai
import os
+from rich.console import Console
+from rich.markdown import Markdown
import sqlite_utils
import sys
import warnings
@@ -50,7 +52,10 @@ def cli():
type=int,
)
@click.option("--code", is_flag=True, help="System prompt to optimize for code output")
-def chatgpt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id):
+@click.option("--rich", "-r", is_flag=True, help="Format Markdown output as rich text")
+def chatgpt(
+ prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id, rich
+):
"Execute prompt against ChatGPT"
if prompt is None:
# Read from stdin instead
@@ -62,6 +67,8 @@ def chatgpt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_i
raise click.ClickException("Cannot use --code and --system together")
if code:
system = CODE_SYSTEM_PROMPT
+ if rich and stream:
+ raise click.ClickException("Cannot use --rich and --stream together")
messages = []
if _continue:
_continue = -1
@@ -107,7 +114,12 @@ def chatgpt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_i
log(no_log, "chatgpt", system, prompt, content, model, chat_id)
if code:
content = unwrap_markdown(content)
- print(content)
+ if rich:
+ console = Console()
+ markdown = Markdown(content)
+ console.print(markdown)
+ else:
+ print(content)
except openai.error.OpenAIError as ex:
raise click.ClickException(str(ex)) |
Here's that updated prototype: diff --git a/llm/cli.py b/llm/cli.py
index 2ed4d8b..5b74959 100644
--- a/llm/cli.py
+++ b/llm/cli.py
@@ -4,6 +4,9 @@ import datetime
import json
import openai
import os
+from rich.console import Console
+from rich.markdown import Markdown
+from rich.table import Table
import sqlite_utils
import sys
import warnings
@@ -50,7 +53,10 @@ def cli():
type=int,
)
@click.option("--code", is_flag=True, help="System prompt to optimize for code output")
-def chatgpt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id):
+@click.option("--rich", "-r", is_flag=True, help="Format Markdown output as rich text")
+def chatgpt(
+ prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id, rich
+):
"Execute prompt against ChatGPT"
if prompt is None:
# Read from stdin instead
@@ -62,6 +68,8 @@ def chatgpt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_i
raise click.ClickException("Cannot use --code and --system together")
if code:
system = CODE_SYSTEM_PROMPT
+ if rich and stream:
+ raise click.ClickException("Cannot use --rich and --stream together")
messages = []
if _continue:
_continue = -1
@@ -107,7 +115,12 @@ def chatgpt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_i
log(no_log, "chatgpt", system, prompt, content, model, chat_id)
if code:
content = unwrap_markdown(content)
- print(content)
+ if rich:
+ console = Console()
+ markdown = Markdown(content)
+ console.print(markdown)
+ else:
+ print(content)
except openai.error.OpenAIError as ex:
raise click.ClickException(str(ex))
@@ -150,7 +163,30 @@ def logs(count, path, truncate):
for row in rows:
row["prompt"] = _truncate_string(row["prompt"])
row["response"] = _truncate_string(row["response"])
- click.echo(json.dumps(list(rows), indent=2))
+
+ # JSON: click.echo(json.dumps(list(rows), indent=2))
+ table = Table()
+ table.add_column("rowid")
+ table.add_column("provider")
+ table.add_column("system")
+ table.add_column("prompt")
+ table.add_column("response")
+ table.add_column("model")
+ table.add_column("timestamp")
+ table.add_column("chat_id")
+ for row in rows:
+ table.add_row(
+ str(row["rowid"]),
+ row["provider"],
+ row["system"],
+ row["prompt"],
+ row["response"],
+ row["model"],
+ row["timestamp"],
+ row["chat_id"],
+ )
+ console = Console()
+ console.print(table)
def _truncate_string(s, max_length=100): |
If this is still a problem, note that I solved this issue in my project ShellGenius. In particular, see the function Here's the basic structure of that function: def rich_markdown_callback(chunk: str) -> None:
"""
Update the live markdown display with the received chunk of text from the API.
Args:
chunk (str): A chunk of text received from the API.
"""
global live_markdown, live_markdown_text
live_markdown_text += chunk
live_markdown = Markdown(live_markdown_text)
live.update(live_markdown) This function is then used in the request to the ChatGPT API in I wanted to expand this project to create something very similar to |
Huh! That's really cool - yeah, I think I could support Found your code for that here: https://github.com/sderev/shellgenius/blob/v0.1.8/shellgenius/cli.py Relevant Rich docs: https://rich.readthedocs.io/en/stable/live.html |
This is a shame: I demonstrated that to myself with:
|
Got an animated Live demo working with this code: diff --git a/llm/cli.py b/llm/cli.py
index 37dd9ed..c6ed001 100644
--- a/llm/cli.py
+++ b/llm/cli.py
@@ -4,6 +4,9 @@ import datetime
import json
import openai
import os
+from rich.console import Console
+from rich.live import Live
+from rich.markdown import Markdown
import sqlite_utils
import sys
import warnings
@@ -86,18 +89,19 @@ def openai_(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_i
try:
if stream:
response = []
- for chunk in openai.ChatCompletion.create(
- model=model,
- messages=messages,
- stream=True,
- ):
- content = chunk["choices"][0].get("delta", {}).get("content")
- if content is not None:
- response.append(content)
- print(content, end="")
- sys.stdout.flush()
- print("")
- log(no_log, "openai", system, prompt, "".join(response), model, chat_id)
+ md = Markdown("")
+ with Live(md) as live:
+ for chunk in openai.ChatCompletion.create(
+ model=model,
+ messages=messages,
+ stream=True,
+ ):
+ content = chunk["choices"][0].get("delta", {}).get("content")
+ if content is not None:
+ response.append(content)
+ live.update(Markdown("".join(response)))
+ print("")
+ log(no_log, "openai", system, prompt, "".join(response), model, chat_id)
else:
response = openai.ChatCompletion.create(
model=model, |
What ended up doing is adding this simple bash function to my function gpt {
local input="$*"
llm -m gpt-4-1106-preview -s 'Answer as short and concise as possible' ${input} | glow
} Not ideal - streaming of LLM output is not working, so you gotta wait for LLM to finish response generation - but good enough for now. |
I'm not an expert regarding plugin architecture, but I was wondering if the code that deals with streaming output could be exposed as a plugin hook. basically this part: # llm/cli.py L278-280, L443-445
for chunk in response:
print(chunk, end="")
sys.stdout.flush() Markdown formatting of the streamed output could then be handled by a plugin, think like It could also enable other use cases. For example I could imagine a plugin that allows streaming output directly to files in the current workspace. |
If anyone is interested, I was able to get this to work outside of # render_streamed_markdown.py
import sys
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
def main():
console = Console()
md = ""
with Live(Markdown(""), console=console, refresh_per_second=10) as live:
while True:
chunk = sys.stdin.read(1)
if not chunk:
break
md += chunk
live.update(Markdown(md))
if __name__ == "__main__":
main() Run it like this: llm "showcase a few key features of markdown. keep it short." | python render_streamed_markdown.py There is one drawback though. It only streams until the terminal height is reached and then displays ellipses. Using One workaround could be to use regular |
@jimmybutton thank you for the script! very useful touch tmp.md; open -ga "Marked 2" tmp.md
llm "showcase a few key features of markdown. keep it short. include the blocks explaining the format and actual examples without blocks" | tee tmp.md |
just for fun squeezed your script down to 8 lines: import sys
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
with Live(md := "", console=Console(), refresh_per_second=10) as live:
while chunk := sys.stdin.read(1):
live.update(Markdown(md := md + chunk)) |
Is it possible to pipe the output of the chat command to rich? |
Got it working with this script: #!/usr/bin/python3
import sys
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
def main():
console = Console()
md = ""
with Live(Markdown(""), console=console, refresh_per_second=10) as live:
while True:
chunk = sys.stdin.read(1)
if not chunk:
break
md += chunk
live.update(Markdown(md))
if __name__ == "__main__":
main() Just place somewhere in a PATH dir, then use like It does it in realtime! |
Yahhh, that's great code to render markdown with streaming. I'm still an analog person. first session next session Come to think of it, the --rich option seems to have disappeared... |
would be interesting to add markdown output preview built in.
for now i'm using it with
glow
https://github.com/charmbracelet/glow to do the sameThe text was updated successfully, but these errors were encountered: