Skip to main content
Version: Latest

LangChain Integration

Use AI Foundation Services with LangChain for RAG, chains, and agent workflows. Since AIFS is OpenAI-compatible, you can use the langchain-openai package directly.


Setup

pip install langchain langchain-openai

Initialize LLM

import os
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY"),
openai_api_base=os.getenv("OPENAI_BASE_URL"),
model_name="Llama-3.3-70B-Instruct",
streaming=True,
)

# Test
for chunk in llm.stream("Write me a short poem about cloud computing."):
if chunk.content:
print(chunk.content, end="", flush=True)

Initialize Embeddings

from typing import List
from langchain_openai import OpenAIEmbeddings
import os

class AIFSEmbeddings(OpenAIEmbeddings):
def embed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings = self.client.create(input=texts, model=self.model)
return [embed.embedding for embed in embeddings.data]

async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
embeddings = await self.async_client.create(input=texts, model=self.model)
return [embed.embedding for embed in embeddings.data]

def embed_query(self, text: str) -> List[float]:
return self.embed_documents([text])[0]

async def aembed_query(self, text: str) -> List[float]:
embed = await self.aembed_documents([text])
return embed[0]

embed_model = AIFSEmbeddings(
openai_api_key=os.getenv("OPENAI_API_KEY"),
openai_api_base=os.getenv("OPENAI_BASE_URL"),
model="text-embedding-bge-m3",
)

# Test
embeddings = embed_model.embed_documents(["Hello!", "World!"])
print(f"Dimensions: {len(embeddings[0])}")

Next Steps

© Deutsche Telekom AG