From 6a6af3350eecaae30231e95224c0575fc659a023 Mon Sep 17 00:00:00 2001 From: Anish Nag Date: Fri, 8 Dec 2023 11:50:12 -0800 Subject: [PATCH 1/2] add custom output key --- .../langchain_experimental/smart_llm/base.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/libs/experimental/langchain_experimental/smart_llm/base.py b/libs/experimental/langchain_experimental/smart_llm/base.py index 8301c5df5345c..758711b6131fc 100644 --- a/libs/experimental/langchain_experimental/smart_llm/base.py +++ b/libs/experimental/langchain_experimental/smart_llm/base.py @@ -66,6 +66,7 @@ def resolve_prompt_inputs(self) -> Dict[str, Any]: prompt: BasePromptTemplate """Prompt object to use.""" + output_key: str = "resolution" ideation_llm: Optional[BaseLanguageModel] = None """LLM to use in ideation step. If None given, 'llm' will be used.""" critique_llm: Optional[BaseLanguageModel] = None @@ -132,8 +133,8 @@ def input_keys(self) -> List[str]: def output_keys(self) -> List[str]: """Defines the output keys.""" if self.return_intermediate_steps: - return ["ideas", "critique", "resolution"] - return ["resolution"] + return ["ideas", "critique", self.output_key] + return [self.output_key] def prep_prompts( self, @@ -169,8 +170,8 @@ def _call( self.history.critique = critique resolution = self._resolve(stop, run_manager) if self.return_intermediate_steps: - return {"ideas": ideas, "critique": critique, "resolution": resolution} - return {"resolution": resolution} + return {"ideas": ideas, "critique": critique, self.ouput_key: resolution} + return {self.output_key: resolution} def _get_text_from_llm_result(self, result: LLMResult, step: str) -> str: """Between steps, only the LLM result text is passed, not the LLMResult object. From 82b4017edbdf8ac11f5a2b06f3a572869f1a80a7 Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Fri, 8 Dec 2023 13:48:28 -0800 Subject: [PATCH 2/2] typo --- libs/experimental/langchain_experimental/smart_llm/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/experimental/langchain_experimental/smart_llm/base.py b/libs/experimental/langchain_experimental/smart_llm/base.py index 758711b6131fc..d9d8929cb07cb 100644 --- a/libs/experimental/langchain_experimental/smart_llm/base.py +++ b/libs/experimental/langchain_experimental/smart_llm/base.py @@ -170,7 +170,7 @@ def _call( self.history.critique = critique resolution = self._resolve(stop, run_manager) if self.return_intermediate_steps: - return {"ideas": ideas, "critique": critique, self.ouput_key: resolution} + return {"ideas": ideas, "critique": critique, self.output_key: resolution} return {self.output_key: resolution} def _get_text_from_llm_result(self, result: LLMResult, step: str) -> str: