Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ---------------------------------------------------------------------------
- ValueError Traceback (most recent call last)
- Cell In[10], line 22
- 19 qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch, return_source_documents=False)
- 21 q = input("input your query:")
- ---> 22 result = qa.run(query=q)
- 24 print(result["result"])
- 25 #print(result["source_documents"])
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:261, in Chain.run(self, callbacks, *args, **kwargs)
- 258 return self(args[0], callbacks=callbacks)[self.output_keys[0]]
- 260 if kwargs and not args:
- --> 261 return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
- 263 if not kwargs and not args:
- 264 raise ValueError(
- 265 "`run` supported with either positional arguments or keyword arguments,"
- 266 " but none were provided."
- 267 )
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:147, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
- 145 except (KeyboardInterrupt, Exception) as e:
- 146 run_manager.on_chain_error(e)
- --> 147 raise e
- 148 run_manager.on_chain_end(outputs)
- 149 final_outputs: Dict[str, Any] = self.prep_outputs(
- 150 inputs, outputs, return_only_outputs
- 151 )
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:141, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
- 135 run_manager = callback_manager.on_chain_start(
- 136 dumpd(self),
- 137 inputs,
- 138 )
- 139 try:
- 140 outputs = (
- --> 141 self._call(inputs, run_manager=run_manager)
- 142 if new_arg_supported
- 143 else self._call(inputs)
- 144 )
- 145 except (KeyboardInterrupt, Exception) as e:
- 146 run_manager.on_chain_error(e)
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/retrieval_qa/base.py:120, in BaseRetrievalQA._call(self, inputs, run_manager)
- 117 question = inputs[self.input_key]
- 119 docs = self._get_docs(question)
- --> 120 answer = self.combine_documents_chain.run(
- 121 input_documents=docs, question=question, callbacks=_run_manager.get_child()
- 122 )
- 124 if self.return_source_documents:
- 125 return {self.output_key: answer, "source_documents": docs}
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:261, in Chain.run(self, callbacks, *args, **kwargs)
- 258 return self(args[0], callbacks=callbacks)[self.output_keys[0]]
- 260 if kwargs and not args:
- --> 261 return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
- 263 if not kwargs and not args:
- 264 raise ValueError(
- 265 "`run` supported with either positional arguments or keyword arguments,"
- 266 " but none were provided."
- 267 )
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:147, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
- 145 except (KeyboardInterrupt, Exception) as e:
- 146 run_manager.on_chain_error(e)
- --> 147 raise e
- 148 run_manager.on_chain_end(outputs)
- 149 final_outputs: Dict[str, Any] = self.prep_outputs(
- 150 inputs, outputs, return_only_outputs
- 151 )
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:141, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
- 135 run_manager = callback_manager.on_chain_start(
- 136 dumpd(self),
- 137 inputs,
- 138 )
- 139 try:
- 140 outputs = (
- --> 141 self._call(inputs, run_manager=run_manager)
- 142 if new_arg_supported
- 143 else self._call(inputs)
- 144 )
- 145 except (KeyboardInterrupt, Exception) as e:
- 146 run_manager.on_chain_error(e)
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/combine_documents/base.py:84, in BaseCombineDocumentsChain._call(self, inputs, run_manager)
- 82 # Other keys are assumed to be needed for LLM prediction
- 83 other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
- ---> 84 output, extra_return_dict = self.combine_docs(
- 85 docs, callbacks=_run_manager.get_child(), **other_keys
- 86 )
- 87 extra_return_dict[self.output_key] = output
- 88 return extra_return_dict
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/combine_documents/stuff.py:87, in StuffDocumentsChain.combine_docs(self, docs, callbacks, **kwargs)
- 85 inputs = self._get_inputs(docs, **kwargs)
- 86 # Call predict on the LLM.
- ---> 87 return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:218, in LLMChain.predict(self, callbacks, **kwargs)
- 203 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
- 204 """Format prompt with kwargs and pass to LLM.
- 205
- 206 Args:
- (...)
- 216 completion = llm.predict(adjective="funny")
- 217 """
- --> 218 return self(kwargs, callbacks=callbacks)[self.output_key]
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:147, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
- 145 except (KeyboardInterrupt, Exception) as e:
- 146 run_manager.on_chain_error(e)
- --> 147 raise e
- 148 run_manager.on_chain_end(outputs)
- 149 final_outputs: Dict[str, Any] = self.prep_outputs(
- 150 inputs, outputs, return_only_outputs
- 151 )
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:141, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
- 135 run_manager = callback_manager.on_chain_start(
- 136 dumpd(self),
- 137 inputs,
- 138 )
- 139 try:
- 140 outputs = (
- --> 141 self._call(inputs, run_manager=run_manager)
- 142 if new_arg_supported
- 143 else self._call(inputs)
- 144 )
- 145 except (KeyboardInterrupt, Exception) as e:
- 146 run_manager.on_chain_error(e)
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:74, in LLMChain._call(self, inputs, run_manager)
- 69 def _call(
- 70 self,
- 71 inputs: Dict[str, Any],
- 72 run_manager: Optional[CallbackManagerForChainRun] = None,
- 73 ) -> Dict[str, str]:
- ---> 74 response = self.generate([inputs], run_manager=run_manager)
- 75 return self.create_outputs(response)[0]
- File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:84, in LLMChain.generate(self, input_list, run_manager)
- 82 """Generate LLM result from inputs."""
- 83 prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
- ---> 84 return self.llm.generate_prompt(
- 85 prompts, stop, callbacks=run_manager.get_child() if run_manager else None
- 86 )
- File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:139, in BaseLLM.generate_prompt(self, prompts, stop, callbacks, **kwargs)
- 131 def generate_prompt(
- 132 self,
- 133 prompts: List[PromptValue],
- (...)
- 136 **kwargs: Any,
- 137 ) -> LLMResult:
- 138 prompt_strings = [p.to_string() for p in prompts]
- --> 139 return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
- File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:203, in BaseLLM.generate(self, prompts, stop, callbacks, **kwargs)
- 201 except (KeyboardInterrupt, Exception) as e:
- 202 run_manager.on_llm_error(e)
- --> 203 raise e
- 204 run_manager.on_llm_end(output)
- 205 if run_manager:
- File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:195, in BaseLLM.generate(self, prompts, stop, callbacks, **kwargs)
- 190 run_manager = callback_manager.on_llm_start(
- 191 dumpd(self), prompts, invocation_params=params, options=options
- 192 )
- 193 try:
- 194 output = (
- --> 195 self._generate(
- 196 prompts, stop=stop, run_manager=run_manager, **kwargs
- 197 )
- 198 if new_arg_supported
- 199 else self._generate(prompts, stop=stop, **kwargs)
- 200 )
- 201 except (KeyboardInterrupt, Exception) as e:
- 202 run_manager.on_llm_error(e)
- File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:493, in LLM._generate(self, prompts, stop, run_manager, **kwargs)
- 490 new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
- 491 for prompt in prompts:
- 492 text = (
- --> 493 self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
- 494 if new_arg_supported
- 495 else self._call(prompt, stop=stop, **kwargs)
- 496 )
- 497 generations.append([Generation(text=text)])
- 498 return LLMResult(generations=generations)
- File /opt/conda/lib/python3.10/site-packages/langchain/llms/huggingface_hub.py:114, in HuggingFaceHub._call(self, prompt, stop, run_manager, **kwargs)
- 112 response = self.client(inputs=prompt, params=params)
- 113 if "error" in response:
- --> 114 raise ValueError(f"Error raised by inference API: {response['error']}")
- 115 if self.client.task == "text-generation":
- 116 # Text generation return includes the starter text.
- 117 text = response[0]["generated_text"][len(prompt) :]
- ValueError: Error raised by inference API: Pipeline cannot infer suitable model classes from lucas0/empath-llama-7b
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement