Skip to content

Commit

Permalink
keep chat model up to date (#148)
Browse files Browse the repository at this point in the history
  • Loading branch information
drf7 authored Jan 29, 2025
1 parent fccd42e commit 1efd95a
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 27 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
## 4.4.9 (unreleased)


- Nothing changed yet.
- Keep ChatModel up to date
- Fix nuclia nua chat format error


## 4.4.8 (2025-01-28)
Expand Down
25 changes: 0 additions & 25 deletions docs/10-activity-log.md
Original file line number Diff line number Diff line change
Expand Up @@ -95,31 +95,6 @@ The `audit_metadata` field is a customizable dictionary. Use the `key` operator
}
}
```
### Special Field: `audit_metadata`
The `audit_metadata` field is a customizable dictionary. Use the `key` operator to target specific keys within the dictionary.

#### Example to filter by `audit_metadata`:

```json
{
"year_month": "2024-10",
"show": ["audit_metadata.environment"],
"filters": {
"audit_metadata": [
{
"key": "environment",
"eq": "prod"
}
]
},
"pagination": {
"limit": 10
}
}
```




### Download Examples

Expand Down
3 changes: 2 additions & 1 deletion nuclia/lib/nua_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _process_messages(self, messages: list[dict[str, str]]) -> tuple[str, str]:

if role == "system":
system_messages.append(content)
elif role == "user":
else:
user_messages.append(content)

formatted_system = "\n".join(system_messages) if system_messages else ""
Expand Down Expand Up @@ -110,6 +110,7 @@ def completion(
system=system_prompt,
user_prompt=UserPrompt(prompt=user_prompt),
query_context={},
format_prompt=False,
)
response: GenerativeFullResponse = self.predict_sync.generate(
text=body,
Expand Down
11 changes: 11 additions & 0 deletions nuclia/lib/nua_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,13 +84,24 @@ class ChatModel(BaseModel):
truncate: Optional[bool] = False
user_prompt: Optional[UserPrompt] = None
citations: Optional[bool] = False
citation_threshold: Optional[float] = Field(
default=None,
description="If citations is set to True, this will be the similarity threshold. Value between 0 and 1, lower values will produce more citations. If not set, it will be set to the optimized threshold found by Nuclia.",
ge=0.0,
le=1.0,
)
generative_model: Optional[str] = None
max_tokens: Optional[int] = None
query_context_images: Union[
List[Image], Dict[str, Image]
] = {} # base64.b64encode(image_file.read()).decode('utf-8')
prefer_markdown: Optional[bool] = None
json_schema: Optional[Dict[str, Any]] = None
format_prompt: bool = True
rerank_context: bool = Field(
default=False,
description="Whether to reorder the query context based on a reranker. This option will also make it so the first response will contain the scores given for each context piece.",
)

@model_validator(mode="after")
def validate_model(self) -> Self:
Expand Down

0 comments on commit 1efd95a

Please sign in to comment.