# Create a new annotation
Source: https://docs.avidoai.com/api-reference/annotations/create-a-new-annotation
openapi.json post /v0/annotations
Creates a new annotation.
# Get a single annotation by ID
Source: https://docs.avidoai.com/api-reference/annotations/get-a-single-annotation-by-id
openapi.json get /v0/annotations/{id}
Retrieves detailed information about a specific annotation.
# List annotations
Source: https://docs.avidoai.com/api-reference/annotations/list-annotations
openapi.json get /v0/annotations
Retrieves a paginated list of annotations with optional filtering.
# Create a new application
Source: https://docs.avidoai.com/api-reference/applications/create-a-new-application
openapi.json post /v0/applications
Creates a new application configuration.
# Get a single application by ID
Source: https://docs.avidoai.com/api-reference/applications/get-a-single-application-by-id
openapi.json get /v0/applications/{id}
Retrieves detailed information about a specific application.
# List applications
Source: https://docs.avidoai.com/api-reference/applications/list-applications
openapi.json get /v0/applications
Retrieves a paginated list of applications with optional filtering.
# List document chunks
Source: https://docs.avidoai.com/api-reference/document-chunks/list-document-chunks
openapi.json get /v0/documents/chunked
Retrieves a paginated list of document chunks with optional filtering by document ID.
# Create a new document
Source: https://docs.avidoai.com/api-reference/documents/create-a-new-document
openapi.json post /v0/documents
Creates a new document with the provided information.
# Delete a document
Source: https://docs.avidoai.com/api-reference/documents/delete-a-document
openapi.json delete /v0/documents/{id}
Deletes a document by ID. Note: This will also affect any child documents that reference this document as a parent.
# Get a single document by ID
Source: https://docs.avidoai.com/api-reference/documents/get-a-single-document-by-id
openapi.json get /v0/documents/{id}
Retrieves detailed information about a specific document, including its parent-child relationships.
# List documents
Source: https://docs.avidoai.com/api-reference/documents/list-documents
openapi.json get /v0/documents
Retrieves a paginated list of documents with optional filtering by status, assignee, parent, and other criteria.
# Update an existing document
Source: https://docs.avidoai.com/api-reference/documents/update-an-existing-document
openapi.json put /v0/documents/{id}
Updates an existing document with the provided information.
# Get a single evaluation by ID
Source: https://docs.avidoai.com/api-reference/evals/get-a-single-evaluation-by-id
openapi.json get /v0/evals/{id}
Retrieves detailed information about a specific evaluation.
# List evaluations
Source: https://docs.avidoai.com/api-reference/evals/list-evaluations
openapi.json get /v0/evals
Retrieves a paginated list of evaluations with optional filtering.
# List tests
Source: https://docs.avidoai.com/api-reference/evals/list-tests
openapi.json get /v0/tests
Retrieves a paginated list of tests with optional filtering.
# Ingest events
Source: https://docs.avidoai.com/api-reference/ingestion/ingest-events
openapi.json post /v0/ingest
Ingest an array of events (threads or traces) to store and process.
# Get a single run by ID
Source: https://docs.avidoai.com/api-reference/runs/get-a-single-run-by-id
openapi.json get /v0/runs/{id}
Retrieves detailed information about a specific run.
# List runs
Source: https://docs.avidoai.com/api-reference/runs/list-runs
openapi.json get /v0/runs
Retrieves a paginated list of runs with optional filtering.
# Create a new style guide
Source: https://docs.avidoai.com/api-reference/style-guides/create-a-new-style-guide
openapi.json post /v0/style-guides
Creates a new style guide.
# Get a single style guide by ID
Source: https://docs.avidoai.com/api-reference/style-guides/get-a-single-style-guide-by-id
openapi.json get /v0/style-guides/{id}
Retrieves detailed information about a specific style guide.
# List style guides
Source: https://docs.avidoai.com/api-reference/style-guides/list-style-guides
openapi.json get /v0/style-guides
Retrieves a paginated list of style guides with optional filtering.
# Create a new task
Source: https://docs.avidoai.com/api-reference/tasks/create-a-new-task
openapi.json post /v0/tasks
Creates a new task.
# Get a single task by ID
Source: https://docs.avidoai.com/api-reference/tasks/get-a-single-task-by-id
openapi.json get /v0/tasks/{id}
Retrieves detailed information about a specific task.
# List tasks
Source: https://docs.avidoai.com/api-reference/tasks/list-tasks
openapi.json get /v0/tasks
Retrieves a paginated list of tasks with optional filtering.
# Run a task
Source: https://docs.avidoai.com/api-reference/tasks/run-a-task
openapi.json post /v0/tasks/trigger
Triggers the execution of a task.
# Update an existing task
Source: https://docs.avidoai.com/api-reference/tasks/update-an-existing-task
openapi.json put /v0/tasks/{id}
Updates an existing task with the provided information.
# Get a single test by ID
Source: https://docs.avidoai.com/api-reference/tests/get-a-single-test-by-id
openapi.json get /v0/tests/{id}
Retrieves detailed information about a specific test.
# Get a single trace by ID
Source: https://docs.avidoai.com/api-reference/threads/get-a-single-trace-by-id
openapi.json get /v0/traces/{id}
Retrieves detailed information about a specific trace.
# List Traces
Source: https://docs.avidoai.com/api-reference/threads/list-traces
openapi.json get /v0/traces
Retrieve threads with associated traces, filtered by application ID and optional date parameters.
# Create a new topic
Source: https://docs.avidoai.com/api-reference/topics/create-a-new-topic
openapi.json post /v0/topics
Creates a new topic.
# Get a single topic by ID
Source: https://docs.avidoai.com/api-reference/topics/get-a-single-topic-by-id
openapi.json get /v0/topics/{id}
Retrieves detailed information about a specific topic.
# List topics
Source: https://docs.avidoai.com/api-reference/topics/list-topics
openapi.json get /v0/topics
Retrieves a paginated list of topics with optional filtering.
# Validate an incoming webhook request
Source: https://docs.avidoai.com/api-reference/webhook/validate-an-incoming-webhook-request
openapi.json post /v0/validate-webhook
Checks the body (including timestamp and signature) against the configured webhook secret. Returns `{ valid: true }` if the signature is valid.
# Changelog
Source: https://docs.avidoai.com/changelog
Track product releases and improvements across Avido versions.

Optimise your articles for RAG straight in Avido – use our best practices to process your original knowledge base, help site or similar into split, optimised articles with proper metadata, ready to ingest into RAG. Much more to come!
### 📖 Recall (RAG Evaluation)
The **Recall** feature in Avido provides a comprehensive way to assess how well your AI application's Retrieval-Augmented Generation (RAG) system is performing.
* Measure key aspects of quality, correctness, and relevancy within your RAG workflow.
* **No-code interface** empowers both technical and non-technical stakeholders to interpret metrics.
* Ensure systems meet required quality standards before production.
### 🛠️ SDK & Trace Improvements
* Micro-second precision when ingesting data.
* Group traces to visualise workflow structure at a glance.
### ☁️ OpenAI on Azure Support
* EU customers can now run all inference on models hosted in Europe.
* Regardless of geography, we, or any of our providers, never train on any data.
### 🐞 Bug Fixes & Polishing
Lots of improvements and paper cuts to make your experience with Avido even smoother, faster, and enjoyable.

We're excited to announce the latest product updates for Avido. Our newest features make it easier and safer to deploy Generative AI, providing peace-of-mind for critical applications.
### 🔍 Enhanced Test View
* Easily dive into each evaluation to pinpoint exactly what's working—and what needs improvement.
* Clearly understand AI performance to rapidly iterate and optimize.
### 📌 Annotations View
* Track application changes seamlessly and visualize how these updates impact individual eval performance.
* Stay informed and make confident deployment decisions with clear version tracking.
### 🔐 Single Sign-On (SSO)
* Support for all major identity providers, making it even easier to roll out Avido in enterprises.
### ⚙️ Custom Evaluations
* Create custom evals directly from our UI or via API.
* Test specific business logic, compliance requirements, brand-specific wording, and other critical aspects of your application, ensuring unmatched accuracy and reliability.
With these updates, Avido continues to empower financial services by ensuring safe, transparent, and high-quality deployment of Generative AI.

We're thrilled to share our first public changelog, marking a step forward in our commitment to enhancing the understanding of AI applications and helping enterprises maximize the value of AI with Avido.
### 🚀 Quickstart Workflow
* Upload existing outputs via CSV to automatically generate evaluation cases
* Smart AI-powered categorization of topics and tasks
* Interactive review interface for selecting benchmark outputs
* Automated evaluation criteria generation based on selected examples
### 📊 Improved Scoring System
* Simplified scoring scale (1-5) for more intuitive evaluation
* Updated benchmarking system for better quality assessment
* Refined evaluation criteria for clearer quality metrics
### 🤖 Smart Analysis
* Automatic topic detection from output patterns
* Task identification based on user intentions
* Intelligent grouping of similar outputs
* Automated quality scoring of historical outputs
### 💡 Enhanced Review Experience
* Visual topic distribution analysis
* Side-by-side conversation comparison
* Guided selection of benchmark outputs
* Contextual feedback collection for evaluation criteria
# Welcome to Avido Docs 🚀
Source: https://docs.avidoai.com/introduction
Avido helps financial service teams ship reliable AI experiences – without slowing down innovation.
* **Automated evaluations** – Continuous monitoring gives you confidence in your performance and catches regressions as they happen
* **No-code interface** – SMEs can run tests directly from Avido's UI, placing responsibility where it belongs
* **Synthetic data testing** – Reduces time to value significantly, allowing for faster roll-out to production
* **Powerful add-ons** – Quickstart templates and RAG Optimizer save you time when building applications
Whether you're building a support assistant, an autonomous agent, or a RAG pipeline, Avido gives you the safety net you need to move fast **and** sleep at night.
***
## How Avido fits into your app

1. **Avido sends a webhook** – When a test is triggered, Avido sends a POST request to your endpoint with synthetic input and a testId.
2. **You validate the request** – Verify the webhook signature to ensure it's really from Avido.
3. **Run your AI workflow** – Process the synthetic input through your normal application flow.
4. **Log events along the way** – Capture LLM calls, tool usage, retrievals, and other key steps.
5. **Send the trace to Avido** – When your workflow completes, send the full event trace back to Avido.
6. **View evaluation results** – Avido runs your configured evaluations and displays results in the dashboard.
***
## Getting Started
1. **Install an SDK**
```bash
npm i @avidoai/sdk-node # Node
pip install avido # Python
```
2. **Setup a webhook endpoint** in your application [Learn more](/webhooks)
3. **Start tracing events** in your application [Learn more](/traces)
```ts
client.ingest.create({ events })
```
4. **Upload existing data** to auto-populate tasks [Learn more](/quickstart)
5. **Review your baseline performance** in the dashboard
> Prefer pure HTTP? All endpoints are [documented here](/api-reference).
***
## Core concepts
| Concept | TL;DR |
| --------------- | ------------------------------------------------------------------------------------------- |
| **Tests** | Automated runs of your workflow using synthetic input. |
| **Webhooks** | Avido triggers a test with a POST. This allows us to do it automatically or through the UI. |
| **Traces** | Ordered lists of events that reconstruct a conversation / agent run. |
| **Events** | Atomic pieces of work (`llm`, `tool`, `retriever`, `log`). |
| **Evaluations** | Rules & metrics (naturalness, recall etc.) applied to traces. |
Dive deeper with the sidebar or jump straight to **[Traces](/traces)** to see how instrumentation works.
***
## Need help?
* **Slack** – join via the *? Help* menu in Avido.
* **Email** – [support@avidoai.com](mailto:support@avidoai.com)
Happy building! ✨
# Tracing
Source: https://docs.avidoai.com/traces
Capture every step of your LLM workflow and send it to Avido for replay, evaluation, and monitoring.
When your chatbot conversation or agent run is in flight, **every action becomes an *event***.\
Bundled together they form a **trace** – a structured replay of what happened, step‑by‑step.
| Event | When to use |
| ----------- | ------------------------------------------------------------------------------- |
| `trace` | The root container for a whole conversation / agent run. |
| `llm` | Start and end of every LLM call. |
| `tool` | Calls to a function / external tool invoked by the model. |
| `retriever` | RAG queries and the chunks they return. |
| `log` | Anything else worth seeing while debugging (system prompts, branches, errors…). |
The full schema lives in API ▸ Ingestion.
***
## Recommended workflow
1. **Collect events in memory** as they happen.
2. **Flush** once at the end (or on fatal error).
3. **Add a `log` event** describing the error if things blow up.
4. **Keep tracing async** – never block your user.
5. **Evaluation‑only mode?** Only ingest when the run came from an Avido test → check for `testId` from the [Webhook](/webhooks).
6. **LLM events** should contain the *raw* prompt & completion – strip provider JSON wrappers.
***
## Ingesting events
You can send events:
* **Directly via HTTP**
* **Via our SDKs** (`avido`)
```bash cURL (default)
curl --request POST \
--url https://api.avidoai.com/v0/ingest \
--header 'Content-Type: application/json' \
--header 'x-api-key: ' \
--data '{
"events": [
{
"type": "trace",
"timestamp": "2025-05-15T12:34:56.123455Z",
"referenceId": "123e4567-e89b-12d3-a456-426614174000",
"metadata": {
"source": "chatbot"
}
},
{
"type": "llm",
"event": "start",
"traceId": "123e4567-e89b-12d3-a456-426614174000",
"timestamp": "2025-05-15T12:34:56.123456Z",
"modelId": "gpt-4o-2024-08-06",
"params": {
"temperature": 1.2
},
"input": [
{
"role": "user",
"content": "Tell me a joke."
}
]
}
]
}'
```
```ts Node
import Avido from 'avido';
const client = new Avido({
applicationId: 'My Application ID',
apiKey: process.env['AVIDO_API_KEY'], // optional – defaults to env
});
const ingest = await client.ingest.create({
events: [
{
timestamp: '2025-05-15T12:34:56.123456Z',
type: 'trace',
testId: 'INSERT UUID'
},
{
timestamp: '2025-05-15T12:34:56.123456Z',
type: 'tool',
toolInput: 'the input to a tool call',
toolOutput: 'the output from the tool call'
}
],
});
console.log(ingest.data);
```
```python Python
import os
from avido import Avido
client = Avido(
api_key=os.environ.get("AVIDO_API_KEY"), # optional – defaults to env
)
ingest = client.ingest.create(
events=[
{
"timestamp": "2025-05-15T12:34:56.123456Z",
"type": "trace",
},
{
"timestamp": "2025-05-15T12:34:56.123456Z",
"type": "trace",
},
],
)
print(ingest.data)
```
***
## Tip: map your IDs
If you already track a conversation / run in your own DB, pass that same ID as `referenceId`.\
It makes liftover between your system and Avido effortless.
***
## Next steps
* Inspect traces in **Traces** inside the dashboard.
Need more examples or have a tricky edge case? [Contact us](mailto:support@avidoai.com) and we’ll expand the docs! 🎯
# Webhooks
Source: https://docs.avidoai.com/webhooks
Trigger automated Avido tests from outside your code and validate payloads securely.
## Why webhook‑triggered tests?
**Part of Avido's secret sauce is that you can kick off a test *without touching your code*.**\
Instead of waiting for CI or redeploys, Avido sends an HTTP `POST` to an endpoint that **you** control.
| Benefit | What it unlocks |
| ----------------------- | ------------------------------------------------------------- |
| **Continuous coverage** | Run tests on prod/staging as often as you like and automated. |
| **SME‑friendly** | Non‑developers trigger & tweak tasks from the Avido UI. |
***
## How it works
1. **A test is triggered** in the dashboard or automatically.
2. **Avido POSTs** to your configured endpoint.
3. **Validate** the `signature` + `timestamp` with our API/SDK.
4. **Run your LLM flow** using `prompt` from the payload.
5. **Emit a trace** that includes `testId` to connect results in Avido.
6. **Return `200 OK`** – any other status tells Avido the test failed.
### Payload example
```json Webhook payload
{
"testId": "123e4567-e89b-12d3-a456-426614174000",
"prompt": "Write a concise onboarding email for new users."
}
```
Headers:
| Header | Purpose |
| ------------------- | ---------------------------------------- |
| `x-avido-signature` | HMAC signature of the payload |
| `x-avido-timestamp` | Unix ms timestamp the request was signed |
***
## Verification flow
```mermaid
sequenceDiagram
Avido->>Your Endpoint: POST payload + headers
Your Endpoint->>Avido API: /v0/validate-webhook
Avido API-->>Your Endpoint: { valid: true }
Your Endpoint->>LLM: run(prompt)
Your Endpoint->>Avido Traces API: POST /v0/traces (testId)
```
If validation fails, respond **401** (or other 4xx/5xx). Avido marks the test as **failed**.
***
## Code examples
```bash cURL (default)
curl --request POST --url https://api.avidoai.com/v0/validate-webhook --header 'Content-Type: application/json' --header 'x-api-key: ' --data '{
"signature": "abc123signature",
"timestamp": 1687802842609,
"body": {
"testId": "123e4567-e89b-12d3-a456-426614174000",
"prompt": "Write a concise onboarding email for new users."
}
}'
```
```ts Node
import express from 'express';
import { Avido } from '@avidoai/sdk-node';
const app = express();
app.use(express.json());
const client = new Avido({ apiKey: process.env.AVIDO_API_KEY! });
app.post('/avido/webhook', async (req, res) => {
const signature = req.get('x-avido-signature');
const timestamp = req.get('x-avido-timestamp');
const body = req.body;
try {
const { valid } = await client.validateWebhook({
signature,
timestamp,
body
});
if (!valid) return res.status(401).send('Invalid webhook');
const result = await runAgent(body.prompt); // 🤖 your LLM call
await client.traces.create({
testId: body.testId,
input: body.prompt,
output: result
});
return res.status(200).send('OK');
} catch (err) {
console.error(err);
return res.status(500).send('Internal error');
}
});
```
```python Python
import os
from flask import Flask, request, jsonify
from avido import Avido
app = Flask(__name__)
client = Avido(api_key=os.environ["AVIDO_API_KEY"])
@app.route("/avido/webhook", methods=["POST"])
def handle_webhook():
body = request.get_json(force=True) or {}
signature = request.headers.get("x-avido-signature")
timestamp = request.headers.get("x-avido-timestamp")
if not signature or not timestamp:
return jsonify({"error": "Missing signature or timestamp"}), 400
try:
resp = client.validate_webhook.validate(
signature=signature,
timestamp=timestamp,
body=body
)
if not resp.valid:
return jsonify({"error": "Invalid webhook signature"}), 401
except Exception as e:
return jsonify({"error": str(e)}), 401
result = run_agent(body.get("prompt")) # your LLM pipeline
client.traces.create(
test_id=body.get("testId"),
input=body.get("prompt"),
output=result
)
return jsonify({"status": "ok"}), 200
```
***
## Next steps
* Send us [Trace events](/traces).
* Schedule or manually tasks from **Tasks** in the dashboard.
* Invite teammates so they can craft evals and eyeball results directly in Avido.
***