Managing Provider Items
Provider Items are files and folders stored in your connected providers. This guide covers all CRUD operations on these items. To discover your providers first, see Browsing Providers.
Operating on provider items (uploading, creating folders, renaming, deleting) does not trigger document indexation. To index a file, you must explicitly create an EliseFile from it.
The key Concept
All provider item operations identify files and folders using a single key string — the full path within the provider:
| Item type | Key format | Example |
|---|---|---|
| File | "path/to/file.pdf" | "reports/research-paper.pdf" |
| Folder | "path/to/folder/" (trailing /) | "reports/2024/" |
| Root | "" (empty string) | "" |
Listing Items
Browse the contents of a provider directory. Results use cursor-based pagination, which is different from the page-based pagination used by other endpoints.
- cURL
- Python (httpx)
- R
- SDK
curl -s "https://<api-domain>/api/core/providers/${PROVIDER_ID}/items?key=reports/&limit=50" \
-H "Authorization: Bearer <your-pat>"
from pydantic import BaseModel, Field
class ProviderItem(BaseModel):
provider_id: str = Field(alias="providerId")
key: str
type: str
size: int | None = None
extension: str | None = None
media_type: str | None = Field(default=None, alias="mediaType")
last_modified: int | None = Field(default=None, alias="lastModified")
class ListItemsResponse(BaseModel):
items: list[ProviderItem]
next_cursor: str | None = Field(default=None, alias="nextCursor")
provider_id = "550e8400-e29b-41d4-a716-446655440000"
response = client.get(
f"/providers/{provider_id}/items",
params={"key": "reports/", "limit": 50},
)
response.raise_for_status()
result = ListItemsResponse.model_validate(response.json())
for item in result.items:
size_info = f" ({item.size} bytes)" if item.size else ""
print(f"[{item.type}] {item.key}{size_info}")
provider_id <- "550e8400-e29b-41d4-a716-446655440000"
resp <- base_req |>
req_url_path_append("providers", provider_id, "items") |>
req_url_query(key = "reports/", limit = 50) |>
req_perform()
result <- resp_body_json(resp)
for (item in result$items) {
cat(sprintf("[%s] %s\n", item$type, item$key))
}
import asyncio
from biolevate import BiolevateClient
async def main():
async with BiolevateClient(
base_url="https://<api-domain>",
token="<your-pat>",
) as client:
provider_id = "550e8400-e29b-41d4-a716-446655440000"
result = await client.items.list(provider_id, key="reports/")
for item in result.items:
print(f"[{item.type_}] {item.key}")
asyncio.run(main())
Query Parameters
| Parameter | Type | Default | Description |
|---|---|---|---|
key | string | "" | Directory key to list (must end with / or be empty) |
q | string | -- | Filter items by name |
cursor | string | -- | Pagination cursor from a previous response |
limit | integer | 50 | Maximum number of items to return |
Paginating Through Results
When nextCursor is non-null, more items are available. Pass it as the cursor parameter to fetch the next page.
- Python (httpx)
- R
def list_all_items(provider_id: str, key: str = "") -> list[ProviderItem]:
all_items: list[ProviderItem] = []
cursor = None
while True:
params: dict = {"key": key, "limit": 50}
if cursor:
params["cursor"] = cursor
response = client.get(f"/providers/{provider_id}/items", params=params)
response.raise_for_status()
result = ListItemsResponse.model_validate(response.json())
all_items.extend(result.items)
if not result.next_cursor:
break
cursor = result.next_cursor
return all_items
list_all_items <- function(provider_id, key = "") {
all_items <- list()
cursor <- NULL
repeat {
req <- base_req |>
req_url_path_append("providers", provider_id, "items") |>
req_url_query(key = key, limit = 50)
if (!is.null(cursor)) {
req <- req |> req_url_query(cursor = cursor)
}
resp <- req_perform(req)
result <- resp_body_json(resp)
all_items <- c(all_items, result$items)
if (is.null(result$nextCursor)) break
cursor <- result$nextCursor
}
all_items
}
Uploading a File
Upload a file to a provider directory using multipart form data. This stores the file in the provider but does not trigger indexation.
- cURL
- Python (httpx)
- R
- SDK
curl -X POST "https://<api-domain>/api/core/providers/${PROVIDER_ID}/items?key=reports/" \
-H "Authorization: Bearer <your-pat>" \
-F "file=@document.pdf"
from pathlib import Path
file_path = Path("document.pdf")
with file_path.open("rb") as f:
response = client.post(
f"/providers/{provider_id}/items",
params={"key": "reports/"},
files={"file": (file_path.name, f, "application/pdf")},
)
response.raise_for_status()
uploaded = ProviderItem.model_validate(response.json())
print(f"Uploaded: {uploaded.key}")
resp <- base_req |>
req_url_path_append("providers", provider_id, "items") |>
req_url_query(key = "reports/") |>
req_body_multipart(file = curl::form_file("document.pdf", "application/pdf")) |>
req_perform()
uploaded <- resp_body_json(resp)
cat(sprintf("Uploaded: %s\n", uploaded$key))
import asyncio
from biolevate import BiolevateClient
async def main():
async with BiolevateClient(
base_url="https://<api-domain>",
token="<your-pat>",
) as client:
provider_id = "550e8400-e29b-41d4-a716-446655440000"
with open("document.pdf", "rb") as f:
item = await client.items.upload(
provider_id=provider_id,
key="reports/",
file=f,
file_name="document.pdf",
mime_type="application/pdf",
)
print(f"Uploaded: {item.key}")
asyncio.run(main())
For files larger than a few megabytes, use the presigned upload flow instead. See Common Patterns -- Large File Upload.
Creating a Folder
- cURL
- Python (httpx)
- R
- SDK
curl -X POST "https://<api-domain>/api/core/providers/${PROVIDER_ID}/items" \
-H "Authorization: Bearer <your-pat>" \
-H "Content-Type: application/json" \
-d '{"type": "FOLDER", "key": "reports/new-project/"}'
response = client.post(
f"/providers/{provider_id}/items",
json={"type": "FOLDER", "key": "reports/new-project/"},
)
response.raise_for_status()
folder = ProviderItem.model_validate(response.json())
print(f"Created folder: {folder.key}")
resp <- base_req |>
req_url_path_append("providers", provider_id, "items") |>
req_body_json(list(type = "FOLDER", key = "reports/new-project/")) |>
req_perform()
folder <- resp_body_json(resp)
cat(sprintf("Created folder: %s\n", folder$key))
import asyncio
from biolevate import BiolevateClient
async def main():
async with BiolevateClient(
base_url="https://<api-domain>",
token="<your-pat>",
) as client:
provider_id = "550e8400-e29b-41d4-a716-446655440000"
folder = await client.items.create_folder(
provider_id=provider_id,
key="reports/new-project/",
)
print(f"Created folder: {folder.key}")
asyncio.run(main())
Renaming an Item
- cURL
- Python (httpx)
- R
- SDK
curl -X PATCH "https://<api-domain>/api/core/providers/${PROVIDER_ID}/items?newName=renamed-file.pdf" \
-H "Authorization: Bearer <your-pat>" \
-H "Content-Type: application/json" \
-d '{"key": "reports/old-file.pdf", "type": "FILE"}'
response = client.patch(
f"/providers/{provider_id}/items",
params={"newName": "renamed-file.pdf"},
json={"key": "reports/old-file.pdf", "type": "FILE"},
)
response.raise_for_status()
renamed = ProviderItem.model_validate(response.json())
print(f"Renamed to: {renamed.key}")
resp <- base_req |>
req_url_path_append("providers", provider_id, "items") |>
req_url_query(newName = "renamed-file.pdf") |>
req_method("PATCH") |>
req_body_json(list(key = "reports/old-file.pdf", type = "FILE")) |>
req_perform()
renamed <- resp_body_json(resp)
cat(sprintf("Renamed to: %s\n", renamed$key))
import asyncio
from biolevate import BiolevateClient
async def main():
async with BiolevateClient(
base_url="https://<api-domain>",
token="<your-pat>",
) as client:
provider_id = "550e8400-e29b-41d4-a716-446655440000"
item = await client.items.rename(
provider_id=provider_id,
key="reports/old-file.pdf",
new_name="renamed-file.pdf",
)
print(f"Renamed to: {item.key}")
asyncio.run(main())
Deleting an Item
- cURL
- Python (httpx)
- R
- SDK
curl -X DELETE "https://<api-domain>/api/core/providers/${PROVIDER_ID}/items" \
-H "Authorization: Bearer <your-pat>" \
-H "Content-Type: application/json" \
-d '{"key": "reports/old-file.pdf", "type": "FILE"}'
response = client.request(
"DELETE",
f"/providers/{provider_id}/items",
json={"key": "reports/old-file.pdf", "type": "FILE"},
)
response.raise_for_status()
print("Item deleted")
base_req |>
req_url_path_append("providers", provider_id, "items") |>
req_method("DELETE") |>
req_body_json(list(key = "reports/old-file.pdf", type = "FILE")) |>
req_perform()
cat("Item deleted\n")
import asyncio
from biolevate import BiolevateClient
async def main():
async with BiolevateClient(
base_url="https://<api-domain>",
token="<your-pat>",
) as client:
provider_id = "550e8400-e29b-41d4-a716-446655440000"
await client.items.delete(
provider_id=provider_id,
key="reports/old-file.pdf",
)
print("Item deleted")
asyncio.run(main())
Downloading a File
There are two ways to download a file depending on whether the provider supports presigned URLs.
Getting a Download URL
Request a download URL for a file. If the provider supports presigned URLs (S3, Azure, GCS), you get a time-limited direct link. Otherwise, the API returns a proxy URL that streams the content through the server.
| Parameter | Required | Description |
|---|---|---|
key | Yes | Full file key |
expirationMinutes | No | Custom expiry duration in minutes for presigned URLs (provider default if omitted) |
- cURL
- Python (httpx)
- R
- SDK
curl -s "https://<api-domain>/api/core/providers/${PROVIDER_ID}/items/download-url?key=reports/document.pdf&expirationMinutes=60" \
-H "Authorization: Bearer <your-pat>"
from pydantic import BaseModel, Field
class DownloadUrlResponse(BaseModel):
url: str
expires_in_seconds: int = Field(alias="expiresInSeconds")
response = client.get(
f"/providers/{provider_id}/items/download-url",
params={"key": "reports/document.pdf", "expirationMinutes": 60},
)
response.raise_for_status()
download = DownloadUrlResponse.model_validate(response.json())
if download.expires_in_seconds > 0:
print(f"Presigned URL (expires in {download.expires_in_seconds}s): {download.url}")
else:
print(f"Proxy URL: {download.url}")
resp <- base_req |>
req_url_path_append("providers", provider_id, "items", "download-url") |>
req_url_query(key = "reports/document.pdf", expirationMinutes = 60) |>
req_perform()
download <- resp_body_json(resp)
if (download$expiresInSeconds > 0) {
cat(sprintf("Presigned URL (expires in %ds): %s\n", download$expiresInSeconds, download$url))
} else {
cat(sprintf("Proxy URL: %s\n", download$url))
}
import asyncio
from biolevate import BiolevateClient
async def main():
async with BiolevateClient(
base_url="https://<api-domain>",
token="<your-pat>",
) as client:
provider_id = "550e8400-e29b-41d4-a716-446655440000"
download = await client.items.get_download_url(
provider_id=provider_id,
key="reports/document.pdf",
expiration_minutes=60,
)
print(f"Download URL: {download.url}")
asyncio.run(main())
Direct Content Download
For providers that do not support presigned URLs, you can also stream file content directly through the API proxy endpoint:
curl -s "https://<api-domain>/api/core/providers/${PROVIDER_ID}/items/content?key=reports/document.pdf" \
-H "Authorization: Bearer <your-pat>" \
-o document.pdf
Next Steps
Now that you can manage files in your storage providers:
- Index files to create EliseFiles and trigger AI-powered analysis
- Organize into collections for structured workflows
- Large file uploads for efficient upload of big files
See the Provider Items endpoints in the API Reference for complete request/response schemas.