Skip to content

Commit

Permalink
fix: improved --help output
Browse files Browse the repository at this point in the history
  • Loading branch information
ErikBjare committed Oct 27, 2023
1 parent b21870a commit 3e6d4fb
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 18 deletions.
9 changes: 6 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,11 @@ Usage: gptme [OPTIONS] [PROMPTS]...
/undo Undo the last action.
/log Show the conversation log.
/edit Edit previous messages.
/rename Rename the conversation.
/fork Create a copy of the conversation with a new name.
/summarize Summarize the conversation so far.
/load Load a file.
/save Save the most recent code block to a file.
/shell Execute a shell command.
/python Execute a Python command.
/replay Re-execute past commands in the conversation (does not store output in log).
Expand All @@ -145,9 +148,8 @@ Options:
--name TEXT Name of conversation. Defaults to generating
a random name. Pass 'ask' to be prompted for
a name.
--llm [openai|llama] LLM to use.
--model [gpt-4|gpt-3.5-turbo|wizardcoder-...]
Model to use (gpt-3.5 not recommended)
--llm [openai|local] LLM to use.
--model TEXT Model to use.
--stream / --no-stream Stream responses
-v, --verbose Verbose output.
-y, --no-confirm Skips all confirmation prompts.
Expand All @@ -157,6 +159,7 @@ Options:
used in testing.
--show-hidden Show hidden system messages.
--version Show version.
--server Run as server.
--help Show this message and exit.
```

Expand Down
6 changes: 3 additions & 3 deletions gptme/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
logger = logging.getLogger(__name__)
print_builtin = __builtins__["print"] # type: ignore

LLMChoice = Literal["openai", "llama"]
LLMChoice = Literal["openai", "local"]
ModelChoice = Literal["gpt-3.5-turbo", "gpt4"]


Expand Down Expand Up @@ -86,12 +86,12 @@
"--llm",
default="openai",
help="LLM to use.",
type=click.Choice(["openai", "llama"]),
type=click.Choice(["openai", "local"]),
)
@click.option(
"--model",
default="gpt-4",
help="Model to use (gpt-3.5 not recommended). Can be: gpt-4, gpt-3.5-turbo, wizardcoder-..., etc.",
help="Model to use.",
)
@click.option(
"--stream/--no-stream",
Expand Down
13 changes: 1 addition & 12 deletions gptme/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def init_llm(llm: str):
print("Error: OPENAI_API_KEY not set in env or config, see README.")
sys.exit(1)
openai.api_key = api_key
elif llm in ["local", "llama"]:
elif llm == "local":
if "OPENAI_API_BASE" in os.environ:
api_base = os.environ["OPENAI_API_BASE"]
elif api_base := config["env"].get("OPENAI_API_BASE", None):
Expand All @@ -58,17 +58,6 @@ def reply(messages: list[Message], model: str, stream: bool = False) -> Message:
return Message("assistant", response)


def _complete(prompt: str, model: str) -> str:
print(prompt)
response = openai.Completion.create( # type: ignore
model=model,
prompt=prompt,
temperature=temperature,
top_p=top_p,
)
return response.choices[0].text


def _chat_complete(messages: list[Message], model: str) -> str:
# This will generate code and such, so we need appropriate temperature and top_p params
# top_p controls diversity, temperature controls randomness
Expand Down

0 comments on commit 3e6d4fb

Please sign in to comment.