File tree 1 file changed +8
-4
lines changed
1 file changed +8
-4
lines changed Original file line number Diff line number Diff line change @@ -845,25 +845,29 @@ print(llm_chain.run(question))
845
845
将 Hugging Face 模型直接拉到本地使用
846
846
847
847
``` python
848
+ from langchain import PromptTemplate, LLMChain
848
849
from langchain.llms import HuggingFacePipeline
849
850
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM
850
851
851
852
model_id = ' google/flan-t5-large'
852
853
tokenizer = AutoTokenizer.from_pretrained(model_id)
853
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_8bit = True )
854
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_id) # load_in_8bit=True, # , device_map='auto'
854
855
855
856
pipe = pipeline(
856
857
" text2text-generation" ,
857
- model = model,
858
- tokenizer = tokenizer,
858
+ model = model,
859
+ tokenizer = tokenizer,
859
860
max_length = 100
860
861
)
861
862
862
863
local_llm = HuggingFacePipeline(pipeline = pipe)
863
864
print (local_llm(' What is the capital of France? ' ))
864
865
865
866
866
- llm_chain = LLMChain(prompt = prompt, llm = local_llm)
867
+ template = """ Question: {question} Answer: Let's think step by step."""
868
+ prompt = PromptTemplate(template = template, input_variables = [" question" ])
869
+
870
+ llm_chain = LLMChain(prompt = prompt, llm = local_llm)
867
871
question = " What is the capital of England?"
868
872
print (llm_chain.run(question))
869
873
```
You can’t perform that action at this time.
0 commit comments