Prompt engineering example using LangChain and Google Gemini:
!pip install langchain-google-genai langchain-core
import langchain_google_genai
import langchain_core
#print(langchain_google_genai.__version__)
print(langchain_core.__version__)
# Output:
# 1.1.1
# --- A. Install Required Libraries ---
# !pip install langchain-google-genai langchain-core
import os
from getpass import getpass
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
# --- B. Set Your API Key ---
# The LangChain integration automatically looks for the GOOGLE_API_KEY environment variable.
os.environ["GOOGLE_API_KEY"] = getpass("Enter your API key: ")
# (Your key appears here in the screenshot, removed for safety)
# --- C. Load the Model ---
# Use the ChatGoogleGenerativeAI class for the Gemini model integration
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-flash",
temperature=0.3
)
# --- D. Create a Prompt Template ---
# LangChain uses ChatPromptTemplate for modern chat models
template = """
Question: {question}
Answer: Let's think step by step.
"""
prompt = ChatPromptTemplate.from_template(template)
# --- E. Create the Chain (LCEL) ---
# A chain connects the prompt, the model, and an optional output parser.
chain = prompt | llm | StrOutputParser() # StrOutputParser converts the AIMessage object to a plain string
# --- F. Test and Run ---
question = "Quantum Computing"
print("Running Gemini Model Chain ")
response = chain.invoke({"question": question})
print(f"\nQuestion: {question}")
print(f"Answer: {response.strip()}")