




from zenml import pipeline, step
@step
def load_data():
# Load and preprocess your data
...
return train_data, test_data
@step
def train_model(train_data):
# Train using ANY ML framework
...
return model
@step
def evaluate(model, test_data):
# Evaluate and log metrics
...
return metrics
@pipeline
def ml_pipeline():
train, test = load_data()
model = train_model(train)
evaluate(model, test)
from langfuse import observe, get_client
from langfuse.openai import openai
langfuse = get_client()
@observe()
def solve(question: str) -> str:
prompt = langfuse.get_prompt("calculator")
# Instrumented OpenAI call; links prompt version to output
resp = openai.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": prompt.compile(base=10)},
{"role": "user", "content": question},
],
langfuse_prompt=prompt,
)
answer = resp.choices[0].message.content
# Attach an evaluation score to the current trace
langfuse.score_current_trace(name="is_correct", value=1)
return answer
print(solve("1 + 1 = ?"))
langfuse.flush()


