! pip install -U "langchain[google-genai]"
import os
from langchain_google_genai import ChatGoogleGenerativeAI
os.environ["GOOGLE_API_KEY"] = getpass("Your API here")
model = ChatGoogleGenerativeAI(model="gemini-2.5-flash-lite")
response = model.invoke("Why do parrots talk?")
print(response)
print(response.content)
! pip install langchain-google-genai langchain-core
import langchain_google_genai
import langchain_core
#print(langchain_google_genai.__version__)
print(langchain_core.__version__)
# --- A. Install Required Libraries ---
!pip install langchain-google-genai langchain-core
import os
from getpass import getpass
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
# --- B. Set Your API Key ---
# The LangChain integration automatically looks for the GOOGLE_API_KEY environment variable.
os.environ["GOOGLE_API_KEY"] = getpass("Your API here")
# --- C. Load the Model ---
# Use the ChatGoogleGenerativeAI class for the Gemini model integration
llm = ChatGoogleGenerativeAI(
model="gemini-2.5-flash",
temperature=0.3
)
# --- D. Create a Prompt Template ---
# LangChain uses ChatPromptTemplate for modern chat models
template = """
Question: {question}
Answer: Let's think step by step.
"""
prompt = ChatPromptTemplate.from_template(template)
# --- E. Create the Chain (LCEL) ---
# A chain connects the prompt, the model, and an optional output parser.
chain = prompt | llm | StrOutputParser() # StrOutputParser converts the AIMessage object to a plain string
# --- F. Test and Run ---
question = "Quantum Computing"
print("Running Gemini Model Chain ")
response = chain.invoke({"question": question})
print(f"\nQuestion: {question}")
print(f"Answer: {response.strip()}")
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage
import os
from getpass import getpass
os.environ["GOOGLE_API_KEY"] = getpass("Your API here")
# Initialize the model
model = ChatGoogleGenerativeAI(
model="gemini-2.5-flash",
temperature=0.3
)
# Zero-shot prompt
prompt = "Explain quantum computing in simple terms."
response = model.invoke([HumanMessage(content=prompt)])
print(response.content)
# Few-Shot Prompting Technique
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage, AIMessage
model = ChatGoogleGenerativeAI(
model="gemini-2.5-flash",
temperature=0.3
)
messages = [
HumanMessage(content="Classify the sentiment: 'I love this product!'"),
AIMessage(content="Positive"),
HumanMessage(content="Classify the sentiment: 'This is terrible.'"),
AIMessage(content="Negative"),
HumanMessage(content="Classify the sentiment: 'It's okay, nothing special.'")
]
response = model.invoke(messages)
print(response.content)