Documentation Index
Fetch the complete documentation index at: https://docs.amberdata.io/llms.txt
Use this file to discover all available pages before exploring further.
The Amberdata API uses cursor pagination to handle large datasets efficiently. The API automatically generates the URL for the next page of results when applicable.
For a 200 - OK HTTP response, the URL for the next page is found under the payload.metadata.next property:
{
"status": 200,
"title": "OK",
"description": "Successful request",
"payload": {
"metadata": {
"next": "https://api.amberdata.com/api/v2/defi/lending/compoundv2/assets/ZRX?cursor=N4IgRgNg9gxg1jAFgQwJYDsCSATEAuEARhgE5SA2EgBnIFYAzGbY%2BkAGhAAcAnKAF1hQIOfCBhQAtpygBXdNgBuAJnYgJybnACmffOhkQIHDDAgzsWgIKGAKt2ToAzshh9UUdAFEFW9H0f49MgQjlocbhJaAGJQ3Oq6BIgy6ugASlrI2MiQWqrYqNxaru7oopYAygDCqo58GnwAIsh8uXiE5OSEACwkXYQA7AAcVCMcvthNLfjtnSTUXSOLHI6oAF6thKMgFj7QnFrcALJQFoHBoRz0sfGiAFaOHqqcyADmGxyxqC8YweV13I1mhs6CQAMyEJSLJYgT7fdDBTzySbA2bzKFUDjIRyhPgiAhUAAeWj62CUXUGJFo-UI2FBSiUhApZPIWSCtC0YAshHoXUoJB5JEGqggWL4NlQkVqyCkohm3V6AyFHBFtQAMlAXph5FoCdNwQBfIA"
},
...
}
}
If the full payload returns in the first request, the next response will be null.
Accessing Next Pages
To access the next page of results:
- Retrieve the URL from
payload.metadata.next
- Copy your request headers from your initial API call (e.g.,
x-api-key, etc.)
- Make a HTTP GET request with the next page URL and the copied headers
Page Parameter
Some endpoints contain column names in the metadata instead of a next field. For these endpoints, use the page parameter to loop through all pages of data, beginning at page 0.
Query parameter: page
Options: 0 - ∞
Querying Long Timeframes
API endpoints have a maximum supported range for the query parameters endDate and startDate.
Note: Amberdata reserves the right to increase the supported range for endDate and startDate for any endpoint. An increase in the supported range is fully backwards compatible.
Example: Getting 1 Year of Data
The following example demonstrates how to get 1 year of data for the DEX - Trades endpoint, which has a maximum range of 30 days.
Warning: The code below has been verified and tested for demonstration purposes only.
Main Script (rest_endpoints.py)
import os
from datetime import datetime
from dataclasses import dataclass, field
from endpoint_timerange_handler import EndpointTimeRangeHandler, Endpoint
from endpoint_caller_v2 import EndpointCaller
def http_ok_next_page_url_extractor(page):
"""
Function that extracts the next page url from the current page of data
Parameters
----------
page: dict
Returns
-------
next_page_url: str
"""
if 'payload' in page and page['payload'] is not None:
payload = page['payload']
if 'metadata' in payload and payload['metadata'] is not None:
metadata = payload['metadata']
if 'next' in metadata and metadata['next'] is not None:
next_page_url = metadata['next']
return next_page_url
return ""
@dataclass(kw_only=True)
class DEXTradesHistorical(Endpoint):
poolAddress: str
path_template: str = field(default='/market/defi/trades/{}/historical')
max_interval_in_seconds: int = field(default=2592000) #30 days * 24 hrs * 60 min * 60 seconds
def format_path(self) -> str:
return self.path_template.format(self.poolAddress)
def get_api_responses(start_date: datetime, end_date: datetime, endpoint: DEXTradesHistorical) -> None:
"""
Get all pages of data between `start_date` and `end_date`.
"""
endpoint_caller = EndpointCaller(os.getenv('PRODUCTION_API_KEY'))
endpoint_timerange_handler = EndpointTimeRangeHandler(endpoint_caller)
for page in endpoint_timerange_handler.get_data_for_timerange(
start_date,
end_date,
endpoint,
http_ok_next_page_url_extractor
):
response = page.data
print(f"Timestamp of first entry in the page: {response['payload']['data'][0][1]}")
def call_dex_trades_historical() -> None:
"""
Example configuration of an endpoint to be called.
"""
start_date = "2022-01-01T00:00:00"
end_date = "2023-01-01T00:00:00"
start_date_as_dt = datetime.fromisoformat(start_date).replace(microsecond=0)
end_date_as_dt = datetime.fromisoformat(end_date).replace(microsecond=0)
poolAddress = '0xcbcdf9626bc03e24f779434178a73a0b4bad62ed' # WBTC/ETH 0.3%
exchange = 'uniswapv3'
dex_trades_historical = DEXTradesHistorical(poolAddress=poolAddress)
dex_trades_historical.add_query_parameter('exchange', exchange)
get_api_responses(start_date_as_dt, end_date_as_dt, dex_trades_historical)
if __name__ == "__main__":
call_dex_trades_historical()
Endpoint Timerange Handler (endpoint_timerange_handler.py)
from datetime import datetime, timedelta
from dataclasses import dataclass, field
from endpoint_caller_v2 import EndpointCaller
@dataclass(kw_only=True)
class Endpoint:
"""
This is a parent class that should be inherited and implemented for specific endpoints.
"""
query: dict = field(default_factory=dict)
headers: dict = field(default_factory=dict)
def add_query_parameter(self, parameter_name: str, parameter_value) -> None:
if parameter_name is not None and len(parameter_name) > 0:
self.query[parameter_name] = parameter_value
def format_path(self) -> str:
"""
Child classes must implement this function.
"""
return ""
def add_header(self, header_name: str, header_value: str) -> None:
if header_name is not None and len(header_name) > 0:
self.headers[header_name] = header_value
class EndpointTimeRangeHandler:
def __init__(self, endpoint_caller: EndpointCaller) -> None:
self.endpoint_caller = endpoint_caller
def get_data_for_timerange(self, start_date: datetime, end_date: datetime,
endpoint: Endpoint, http_ok_next_page_url_extractor):
"""
Given an arbitrarily large timerange, this function will call the endpoint
continuously by breaking up the requested time range into chunks.
The chunks do not exceed the single request maximum range (endDate - startDate)
for the specific endpoint.
"""
duration = end_date - start_date
intervals = duration.total_seconds()/endpoint.max_interval_in_seconds
hours = duration.total_seconds()/3600
print(f"Getting {hours} hours of data (# of intervals: {intervals})")
start_date_copy = start_date
timerange_stack = []
while start_date_copy + timedelta(seconds=endpoint.max_interval_in_seconds) <= end_date:
intermediate_end_date = start_date_copy + timedelta(seconds=endpoint.max_interval_in_seconds)
timerange_stack.append((start_date_copy, intermediate_end_date))
start_date_copy = intermediate_end_date
timerange_stack.append((start_date_copy, end_date))
timerange_stack.reverse()
while len(timerange_stack) > 0:
timerange = timerange_stack.pop()
print(f"Retrieving data from {str(timerange[0])} to {str(timerange[1])}")
yield from self.get_data(timerange[0], timerange[1], endpoint, http_ok_next_page_url_extractor)
This approach allows you to efficiently retrieve data for timeframes longer than the maximum supported range by automatically chunking requests and handling pagination across multiple time periods.