Skip to main content

Get your API key

Generate an API key on our API page.

Add Balance

If you haven’t deposited yet, add some funds to your balance. Minimum deposit is just $1, or $0.10 when using crypto.

API usage examples

Here’s a simple example using our OpenAI-compatible chat completions endpoint:
import requests
import json

BASE_URL = "https://nano-gpt.com/api/v1"
API_KEY = "YOUR_API_KEY"  # Replace with your API key

headers = {
    "Authorization": f"Bearer {API_KEY}",
    "Content-Type": "application/json",
    "Accept": "text/event-stream"  # Required for SSE streaming
}

def stream_chat_completion(messages, model="chatgpt-4o-latest"):
    """
    Send a streaming chat completion request using the OpenAI-compatible endpoint.
    """
    data = {
        "model": model,
        "messages": messages,
        "stream": True  # Enable streaming
    }

    response = requests.post(
        f"{BASE_URL}/chat/completions",
        headers=headers,
        json=data,
        stream=True
    )

    if response.status_code != 200:
        raise Exception(f"Error: {response.status_code}")

    for line in response.iter_lines():
        if line:
            line = line.decode('utf-8')
            if line.startswith('data: '):
                line = line[6:]
            if line == '[DONE]':
                break
            try:
                chunk = json.loads(line)
                if chunk['choices'][0]['delta'].get('content'):
                    yield chunk['choices'][0]['delta']['content']
            except json.JSONDecodeError:
                continue

# Example usage
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "Please explain the concept of artificial intelligence."}
]

try:
    print("Assistant's Response:")
    for content_chunk in stream_chat_completion(messages):
        print(content_chunk, end='', flush=True)
    print("")
except Exception as e:
    print(f"Error: {str(e)}")
For more detailed examples and other text generation endpoints, check out our Text Generation Guide.

OpenAI-Compatible Endpoint (v1/images/generations)

You can also generate images using our OpenAI-compatible endpoint:
curl https://nano-gpt.com/v1/images/generations \
  -H "Authorization: Bearer YOUR_API_KEY" \
  -H "Content-Type: application/json" \
  -d '{
    "model": "hidream",
    "prompt": "A serene landscape at sunset",
    "n": 1,
    "size": "1024x1024"
  }'
Here’s an example using our image generation endpoint with the Recraft model:
import requests
import json
import base64
from PIL import Image
import io

BASE_URL = "https://nano-gpt.com/api"
API_KEY = "YOUR_API_KEY"

headers = {
    "x-api-key": API_KEY,
    "Content-Type": "application/json"
}

def generate_image(prompt, model="recraft-v3", width=1024, height=1024):
    """
    Generate an image using the Recraft model.
    """
    data = {
        "prompt": prompt,
        "model": model,
        "width": width,
        "height": height,
        "negative_prompt": "blurry, bad quality, distorted, deformed",
        "nImages": 1,
        "num_steps": 30,
        "resolution": "1024x1024",
        "sampler_name": "DPM++ 2M Karras",
        "scale": 7.5
    }

    response = requests.post(
        f"{BASE_URL}/generate-image",
        headers=headers,
        json=data
    )

    if response.status_code != 200:
        raise Exception(f"Error: {response.status_code}")

    result = response.json()
    
    # Decode and save the image
    image_data = base64.b64decode(result['image'])
    image = Image.open(io.BytesIO(image_data))
    image.save("generated_image.png")
    
    return result

# Example usage
prompt = "A serene landscape with mountains and a lake at sunset, digital art style"
try:
    result = generate_image(prompt)
    print("Image generated successfully!")
    print("Cost:", result.get('cost', 'N/A'))
    print("Image saved as 'generated_image.png'")
except Exception as e:
    print(f"Error: {str(e)}")
For more detailed examples and other image generation options, check out our Image Generation Guide.
NanoGPT supports TEE-backed models for verifiable privacy. You can fetch attestation reports and signatures for chat completions made with these models.Here’s how to fetch an attestation report:
curl "https://nano-gpt.com/api/v1/tee/attestation?model=TEE/hermes-3-llama-3.1-70b" \
  -H "Authorization: Bearer YOUR_API_KEY"
After making a chat request with a TEE model, you can get its signature:
# First, make a chat request (see Text Generation accordion or TEE Verification guide)
# Then, use the request_id from the chat response:
curl "https://nano-gpt.com/api/v1/tee/signature/YOUR_CHAT_REQUEST_ID?model=TEE/hermes-3-llama-3.1-70b&signing_algo=ecdsa" \
  -H "Authorization: Bearer YOUR_API_KEY"
For a complete Python example and more details, see the TEE Verification Guide.
I