Simple code examples for Python, Node.js, curl, and Go. Compatible with OpenAI, Anthropic, and all major AI models.
Get started in seconds with copy-paste code snippets for Python and Node.js.
import openai
import requests
AGENT_LEDGER_API_KEY = "your_api_key_here"
AGENT_LEDGER_AGENT_ID = "your_agent_id_here"
def log_decision(prompt, response, risk_score=None):
requests.post(
f"https://agentledgerhq.com/api/v1/agents/{AGENT_LEDGER_AGENT_ID}/decisions",
headers={"Authorization": f"Bearer {AGENT_LEDGER_API_KEY}"},
json={
"decision_type": "llm_completion",
"input_data": {"prompt": prompt},
"output_data": {"response": response},
"risk_score": risk_score or 50
}
)
# Usage:
client = openai.OpenAI()
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "your prompt here"}]
)
log_decision("your prompt here", response.choices[0].message.content)
const OpenAI = require('openai');
const fetch = require('node-fetch');
const AGENT_LEDGER_API_KEY = 'your_api_key_here';
const AGENT_LEDGER_AGENT_ID = 'your_agent_id_here';
async function logDecision(prompt, response, riskScore = 50) {
await fetch(`https://agentledgerhq.com/api/v1/agents/${AGENT_LEDGER_AGENT_ID}/decisions`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${AGENT_LEDGER_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
decision_type: 'llm_completion',
input_data: { prompt },
output_data: { response },
risk_score: riskScore
})
});
}
// Usage:
const client = new OpenAI();
const response = await client.chat.completions.create({
model: 'gpt-4',
messages: [{ role: 'user', content: 'your prompt here' }]
});
await logDecision('your prompt here', response.choices[0].message.content);
from openai import AzureOpenAI
import requests
AGENT_LEDGER_API_KEY = "your_api_key_here"
AGENT_LEDGER_AGENT_ID = "your_agent_id_here"
# Azure configuration
AZURE_API_KEY = "your_azure_api_key"
AZURE_ENDPOINT = "https://your-instance.openai.azure.com/"
AZURE_DEPLOYMENT = "gpt-4-deployment"
def log_decision(prompt, response, risk_score=None):
requests.post(
f"https://agentledgerhq.com/api/v1/agents/{AGENT_LEDGER_AGENT_ID}/decisions",
headers={"Authorization": f"Bearer {AGENT_LEDGER_API_KEY}"},
json={
"decision_type": "llm_completion",
"input_data": {"prompt": prompt},
"output_data": {"response": response},
"risk_score": risk_score or 50
}
)
# Usage with Azure OpenAI:
client = AzureOpenAI(
api_key=AZURE_API_KEY,
api_version="2024-02-15-preview",
azure_endpoint=AZURE_ENDPOINT
)
response = client.chat.completions.create(
deployment_id=AZURE_DEPLOYMENT,
messages=[{"role": "user", "content": "your prompt here"}]
)
log_decision("your prompt here", response.choices[0].message.content)
Log AI agent decisions directly from your Python application.
import requests
import json
from datetime import datetime
# Initialize Agent Ledger client
api_key = "your_api_key_here"
base_url = "https://api.agentledgerhq.com"
def log_agent_decision(agent_id, action, output, risk_level="medium"):
"""Log an AI agent decision to Agent Ledger."""
payload = {
"agent_id": agent_id,
"action": action,
"output": output,
"risk_level": risk_level,
"timestamp": datetime.utcnow().isoformat(),
"model": "gpt-4",
"tokens_used": 1250
}
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
response = requests.post(
f"{base_url}/api/v1/decisions",
json=payload,
headers=headers
)
return response.json()
# Example: Log a compliance decision
result = log_agent_decision(
agent_id="sales-agent-01",
action="Generated contract proposal",
output="Contract for customer XYZ Inc.",
risk_level="medium"
)
print(f"Decision logged: {result['id']}")
Log AI agent decisions from your Node.js/Express application.
const axios = require('axios');
// Initialize Agent Ledger client
const apiKey = process.env.AGENT_LEDGER_API_KEY;
const baseUrl = 'https://api.agentledgerhq.com';
async function logAgentDecision(agentId, action, output, riskLevel = 'medium') {
try {
const payload = {
agent_id: agentId,
action: action,
output: output,
risk_level: riskLevel,
timestamp: new Date().toISOString(),
model: 'gpt-4',
tokens_used: 1250
};
const response = await axios.post(
`${baseUrl}/api/v1/decisions`,
payload,
{
headers: {
'Authorization': `Bearer ${apiKey}`,
'Content-Type': 'application/json'
}
}
);
return response.data;
} catch (error) {
console.error('Failed to log decision:', error.message);
throw error;
}
}
// Example: Log a credit decision
const result = await logAgentDecision(
'finance-agent-01',
'Evaluated credit application',
'Credit approved for $50,000',
'high'
);
console.log(`Decision logged: ${result.id}`);
Log decisions directly via HTTP requests.
curl -X POST https://api.agentledgerhq.com/api/v1/decisions \
-H "Authorization: Bearer your_api_key_here" \
-H "Content-Type: application/json" \
-d '{
"agent_id": "hiring-agent-01",
"action": "Screened job candidate",
"output": "Candidate approved for technical interview",
"risk_level": "high",
"timestamp": "'$(date -u +%Y-%m-%dT%H:%M:%SZ)'",
"model": "gpt-4",
"tokens_used": 980
}'
# Response:
# {
# "id": "dec_1a2b3c4d",
# "status": "logged",
# "risk_score": 67,
# "eu_ai_act_category": "high-risk"
# }
Log AI decisions from your Go microservices.
package main
import (
"bytes"
"encoding/json"
"net/http"
"time"
)
type Decision struct {
AgentID string `json:"agent_id"`
Action string `json:"action"`
Output string `json:"output"`
RiskLevel string `json:"risk_level"`
Timestamp string `json:"timestamp"`
Model string `json:"model"`
TokensUsed int `json:"tokens_used"`
}
func logAgentDecision(apiKey string, decision Decision) error {
payload, _ := json.Marshal(decision)
req, _ := http.NewRequest(
"POST",
"https://api.agentledgerhq.com/api/v1/decisions",
bytes.NewBuffer(payload),
)
req.Header.Set("Authorization", "Bearer "+apiKey)
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
defer resp.Body.Close()
return err
}
// Example: Log a compliance decision
decision := Decision{
AgentID: "compliance-agent-01",
Action: "Reviewed document for GDPR compliance",
Output: "Document approved with 2 minor updates",
RiskLevel: "medium",
Timestamp: time.Now().UTC().Format(time.RFC3339),
Model: "gpt-4",
TokensUsed: 1200,
}
logAgentDecision("your_api_key", decision)
Agent Ledger works seamlessly with all major AI models and frameworks. No proprietary lock-in.
Start with our free plan. No credit card required. Integrate in minutes.
Start free โ