> For clean Markdown of any page, append .md to the page URL.
> For a complete documentation index, see https://developers.meshapi.ai/api-reference/mesh-api/chat/llms.txt.
> For full documentation content, see https://developers.meshapi.ai/api-reference/mesh-api/chat/llms-full.txt.

# Create a chat completion

POST https://api.meshapi.ai/v1/chat/completions
Content-Type: application/json

Generates a model response from a list of chat messages.

This endpoint is intentionally shaped like OpenAI's
`POST /v1/chat/completions` so existing SDK integrations can switch
base URLs with minimal change.

Mesh-specific request extensions:
- `template`: resolve a stored prompt template by name or UUID
- `variables`: values used to render `{{slot}}` placeholders
- `session_id`: caller-defined grouping key for usage reporting

Streaming responses are returned as Server-Sent Events when
`stream=true`.


Reference: https://developers.meshapi.ai/api-reference/mesh-api/chat/create-chat-completion

## OpenAPI Specification

```yaml
openapi: 3.1.0
info:
  title: openapi
  version: 1.0.0
paths:
  /v1/chat/completions:
    post:
      operationId: create-chat-completion
      summary: Create a chat completion
      description: |
        Generates a model response from a list of chat messages.

        This endpoint is intentionally shaped like OpenAI's
        `POST /v1/chat/completions` so existing SDK integrations can switch
        base URLs with minimal change.

        Mesh-specific request extensions:
        - `template`: resolve a stored prompt template by name or UUID
        - `variables`: values used to render `{{slot}}` placeholders
        - `session_id`: caller-defined grouping key for usage reporting

        Streaming responses are returned as Server-Sent Events when
        `stream=true`.
      tags:
        - subpackage_chat
      parameters:
        - name: Authorization
          in: header
          description: Bearer authentication
          required: true
          schema:
            type: string
      responses:
        '200':
          description: |
            Successful response.

            Non-streaming requests return a `ChatCompletionResponse` object.
            Streaming requests return `text/event-stream` chunks matching
            `ChatCompletionChunk`, followed by `data: [DONE]`.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ChatCompletionResponse'
        '401':
          description: Missing, malformed, or invalid bearer token.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '402':
          description: The authenticated key has reached its configured spend cap.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '403':
          description: The bearer token is valid but not currently active.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '404':
          description: The requested model cannot be routed.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '422':
          description: The request body failed validation.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '429':
          description: Per-minute or per-day rate limit exceeded.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '502':
          description: Upstream provider returned a non-timeout failure.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '503':
          description: The selected provider is not configured on this deployment.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
        '504':
          description: Upstream provider timed out before completing the request.
          content:
            application/json:
              schema:
                $ref: '#/components/schemas/ErrorEnvelope'
      requestBody:
        content:
          application/json:
            schema:
              $ref: '#/components/schemas/ChatCompletionRequest'
servers:
  - url: https://api.meshapi.ai
components:
  schemas:
    ChatMessageRole:
      type: string
      enum:
        - system
        - user
        - assistant
        - tool
      title: ChatMessageRole
    TextContentPart:
      type: object
      properties:
        type:
          type: string
          enum:
            - text
        text:
          type: string
      required:
        - type
        - text
      title: TextContentPart
    ImageUrlDetail:
      type: string
      enum:
        - auto
        - low
        - high
      default: auto
      title: ImageUrlDetail
    ImageUrl:
      type: object
      properties:
        url:
          type: string
          description: Remote URL or `data:` URI.
        detail:
          $ref: '#/components/schemas/ImageUrlDetail'
      required:
        - url
      title: ImageUrl
    ImageContentPart:
      type: object
      properties:
        type:
          type: string
          enum:
            - image_url
        image_url:
          $ref: '#/components/schemas/ImageUrl'
      required:
        - type
        - image_url
      title: ImageContentPart
    ContentPart:
      oneOf:
        - $ref: '#/components/schemas/TextContentPart'
        - $ref: '#/components/schemas/ImageContentPart'
      title: ContentPart
    ChatMessageContent1:
      type: array
      items:
        $ref: '#/components/schemas/ContentPart'
      title: ChatMessageContent1
    ChatMessageContent:
      oneOf:
        - type: string
        - $ref: '#/components/schemas/ChatMessageContent1'
      title: ChatMessageContent
    ToolCallFunction:
      type: object
      properties:
        name:
          type: string
        arguments:
          type: string
          description: JSON-encoded argument object.
      required:
        - name
        - arguments
      title: ToolCallFunction
    ToolCall:
      type: object
      properties:
        id:
          type: string
        type:
          type: string
          enum:
            - function
        function:
          $ref: '#/components/schemas/ToolCallFunction'
      required:
        - id
        - type
        - function
      title: ToolCall
    ChatMessage:
      type: object
      properties:
        role:
          $ref: '#/components/schemas/ChatMessageRole'
        content:
          oneOf:
            - $ref: '#/components/schemas/ChatMessageContent'
            - type: 'null'
        name:
          type:
            - string
            - 'null'
        tool_call_id:
          type:
            - string
            - 'null'
        tool_calls:
          type:
            - array
            - 'null'
          items:
            $ref: '#/components/schemas/ToolCall'
      required:
        - role
      title: ChatMessage
    ChatCompletionRequestStop:
      oneOf:
        - type: string
        - type: array
          items:
            type: string
      description: One or more stop sequences that end generation when encountered.
      title: ChatCompletionRequestStop
    ToolFunctionParameters:
      type: object
      properties: {}
      title: ToolFunctionParameters
    ToolFunction:
      type: object
      properties:
        name:
          type: string
        description:
          type:
            - string
            - 'null'
        parameters:
          oneOf:
            - $ref: '#/components/schemas/ToolFunctionParameters'
            - type: 'null'
      required:
        - name
      title: ToolFunction
    Tool:
      type: object
      properties:
        type:
          type: string
          enum:
            - function
        function:
          $ref: '#/components/schemas/ToolFunction'
      required:
        - type
        - function
      title: Tool
    ChatCompletionRequestToolChoice0:
      type: string
      enum:
        - none
        - auto
        - required
      title: ChatCompletionRequestToolChoice0
    ToolChoiceFunction:
      type: object
      properties:
        name:
          type: string
      required:
        - name
      title: ToolChoiceFunction
    ToolChoice:
      type: object
      properties:
        type:
          type: string
          enum:
            - function
        function:
          $ref: '#/components/schemas/ToolChoiceFunction'
      required:
        - type
        - function
      title: ToolChoice
    ChatCompletionRequestToolChoice:
      oneOf:
        - $ref: '#/components/schemas/ChatCompletionRequestToolChoice0'
        - $ref: '#/components/schemas/ToolChoice'
      description: >-
        Controls whether the model can call tools automatically, must avoid
        them, must call one, or must call a specific tool.
      title: ChatCompletionRequestToolChoice
    ChatCompletionRequest:
      type: object
      properties:
        model:
          type:
            - string
            - 'null'
          default: openai/gpt-4o
          description: |
            Model identifier. If omitted, the backend resolves it from the API
            key default model or the selected template.
        messages:
          type: array
          items:
            $ref: '#/components/schemas/ChatMessage'
          description: Conversation history in OpenAI chat format.
        template:
          type:
            - string
            - 'null'
          description: Template name or UUID to expand before inference.
        variables:
          type:
            - object
            - 'null'
          additionalProperties:
            type: string
          description: Values used when rendering `{{slot}}` placeholders.
        session_id:
          type:
            - string
            - 'null'
          description: Caller-defined grouping key for usage reporting.
        stream:
          type: boolean
          default: false
          description: When true, returns SSE chunks instead of a JSON object.
        temperature:
          type:
            - number
            - 'null'
          format: double
          description: >-
            Sampling temperature used for token selection. Lower values make
            output more deterministic; higher values increase randomness and
            variation.
        max_tokens:
          type:
            - integer
            - 'null'
          description: Maximum number of completion tokens to generate.
        top_p:
          type:
            - number
            - 'null'
          format: double
          description: >-
            Nucleus sampling threshold. The model samples from the smallest set
            of tokens whose cumulative probability reaches `top_p`.
        frequency_penalty:
          type:
            - number
            - 'null'
          format: double
          description: >-
            Penalizes tokens that have already appeared in the generated output,
            reducing repeated phrasing.
        presence_penalty:
          type:
            - number
            - 'null'
          format: double
          description: >-
            Penalizes tokens that have already appeared at least once, nudging
            the model toward introducing new topics.
        stop:
          oneOf:
            - $ref: '#/components/schemas/ChatCompletionRequestStop'
            - type: 'null'
          description: One or more stop sequences that end generation when encountered.
        seed:
          type:
            - integer
            - 'null'
          description: >-
            Optional seed for best-effort deterministic sampling across repeated
            requests with the same parameters.
        tools:
          type:
            - array
            - 'null'
          items:
            $ref: '#/components/schemas/Tool'
          description: Tool definitions the model may call during the completion.
        tool_choice:
          oneOf:
            - $ref: '#/components/schemas/ChatCompletionRequestToolChoice'
            - type: 'null'
          description: >-
            Controls whether the model can call tools automatically, must avoid
            them, must call one, or must call a specific tool.
        transforms:
          type:
            - array
            - 'null'
          items:
            type: string
          description: Ordered OpenRouter transforms applied before inference.
        models:
          type:
            - array
            - 'null'
          items:
            type: string
          description: Ordered OpenRouter fallback model list.
        user:
          type:
            - string
            - 'null'
          description: End-user identifier forwarded for abuse monitoring.
      required:
        - messages
      title: ChatCompletionRequest
    ChatCompletionChoiceFinishReason:
      type: string
      enum:
        - stop
        - length
        - tool_calls
        - content_filter
        - function_call
      title: ChatCompletionChoiceFinishReason
    ChatCompletionChoiceLogprobs:
      type: object
      properties: {}
      title: ChatCompletionChoiceLogprobs
    ChatCompletionChoice:
      type: object
      properties:
        index:
          type: integer
        message:
          oneOf:
            - $ref: '#/components/schemas/ChatMessage'
            - type: 'null'
        finish_reason:
          oneOf:
            - $ref: '#/components/schemas/ChatCompletionChoiceFinishReason'
            - type: 'null'
        logprobs:
          oneOf:
            - $ref: '#/components/schemas/ChatCompletionChoiceLogprobs'
            - type: 'null'
      required:
        - index
      title: ChatCompletionChoice
    PromptTokensDetails:
      type: object
      properties:
        cached_tokens:
          type:
            - integer
            - 'null'
      title: PromptTokensDetails
    UsageInfo:
      type: object
      properties:
        prompt_tokens:
          type: integer
        completion_tokens:
          type: integer
        total_tokens:
          type: integer
        prompt_tokens_details:
          oneOf:
            - $ref: '#/components/schemas/PromptTokensDetails'
            - type: 'null'
      required:
        - prompt_tokens
        - completion_tokens
        - total_tokens
      title: UsageInfo
    ChatCompletionResponse:
      type: object
      properties:
        id:
          type: string
        object:
          type: string
          enum:
            - chat.completion
        created:
          type: integer
        model:
          type: string
        choices:
          type: array
          items:
            $ref: '#/components/schemas/ChatCompletionChoice'
        usage:
          oneOf:
            - $ref: '#/components/schemas/UsageInfo'
            - type: 'null'
        system_fingerprint:
          type:
            - string
            - 'null'
      required:
        - id
        - object
        - created
        - model
        - choices
      title: ChatCompletionResponse
    ErrorBodyCode:
      type: string
      enum:
        - unauthorized
        - forbidden
        - not_found
        - model_not_found
        - validation_error
        - unprocessable_entity
        - rate_limit_exceeded
        - spend_limit_exceeded
        - conflict
        - upstream_error
        - provider_not_available
        - gateway_timeout
      title: ErrorBodyCode
    ValidationErrorDetailLocItems:
      oneOf:
        - type: string
        - type: integer
      title: ValidationErrorDetailLocItems
    ValidationErrorDetail:
      type: object
      properties:
        loc:
          type: array
          items:
            $ref: '#/components/schemas/ValidationErrorDetailLocItems'
        msg:
          type: string
        type:
          type: string
      required:
        - loc
        - msg
        - type
      title: ValidationErrorDetail
    ProviderError:
      type: object
      properties:
        provider:
          type: string
        status:
          type:
            - integer
            - 'null'
        message:
          type:
            - string
            - 'null'
      title: ProviderError
    ErrorBody:
      type: object
      properties:
        code:
          $ref: '#/components/schemas/ErrorBodyCode'
        message:
          type: string
        param:
          type:
            - string
            - 'null'
        details:
          type:
            - array
            - 'null'
          items:
            $ref: '#/components/schemas/ValidationErrorDetail'
        provider_error:
          oneOf:
            - $ref: '#/components/schemas/ProviderError'
            - type: 'null'
        retry_after_seconds:
          type:
            - integer
            - 'null'
      required:
        - code
        - message
      title: ErrorBody
    ErrorEnvelope:
      type: object
      properties:
        error:
          $ref: '#/components/schemas/ErrorBody'
        request_id:
          type: string
      required:
        - error
        - request_id
      title: ErrorEnvelope
  securitySchemes:
    HTTPBearer:
      type: http
      scheme: bearer

```

## SDK Code Examples

```javascript JavaScript
const response = await fetch("https://api.meshapi.ai/v1/chat/completions", {
  method: "POST",
  headers: {
    "Content-Type": "application/json",
    Authorization: "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  },
  body: JSON.stringify({
    model: "openai/gpt-4o-mini",
    messages: [
      { role: "system", content: "You are a concise assistant." },
      { role: "user", content: "Summarize why async IO matters." }
    ],
    temperature: 0.2
  }),
});

const data = await response.json();
console.log(data.choices[0].message.content);

```

```python Python
import httpx

response = httpx.post(
  "https://api.meshapi.ai/v1/chat/completions",
  headers={"Authorization": "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX"},
  json={
    "model": "openai/gpt-4o-mini",
    "messages": [
      {"role": "system", "content": "You are a concise assistant."},
      {"role": "user", "content": "Summarize why async IO matters."},
    ],
    "temperature": 0.2,
  },
)
response.raise_for_status()
print(response.json()["choices"][0]["message"]["content"])

```

```python Python (SDK)
from meshapi import MeshAPI, ChatCompletionParams, ChatMessage

client = MeshAPI(
  base_url="https://api.meshapi.ai",
  token="rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
)

completion = client.chat.completions.create(
  ChatCompletionParams(
    model="openai/gpt-4o-mini",
    messages=[
      ChatMessage(role="system", content="You are a concise assistant."),
      ChatMessage(role="user", content="Summarize why async IO matters."),
    ],
    temperature=0.2,
  )
)

print(completion.choices[0].message.content)

```

```typescript TypeScript (SDK)
import { MeshAPI } from "meshapi-node-sdk";

const client = new MeshAPI({
  baseUrl: "https://api.meshapi.ai",
  token: "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
});

const completion = await client.chat.completions.create({
  model: "openai/gpt-4o-mini",
  messages: [
    { role: "system", content: "You are a concise assistant." },
    { role: "user", content: "Summarize why async IO matters." },
  ],
  temperature: 0.2,
});

console.log(completion.choices[0]?.message.content);

```

```go Go (SDK)
package main

import (
  "context"
  "fmt"
  "log"

  meshapi "meshapi-go-sdk"
)

func main() {
  client := meshapi.New(meshapi.Config{
    BaseURL: "https://api.meshapi.ai",
    Token:   "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  })

  model := "openai/gpt-4o-mini"
  completion, err := client.Chat.Completions.Create(context.Background(), meshapi.ChatCompletionParams{
    Model: &model,
    Messages: []meshapi.ChatMessage{
      {Role: "system", Content: "You are a concise assistant."},
      {Role: "user", Content: "Summarize why async IO matters."},
    },
    Temperature: meshapi.Float64(0.2),
  })
  if err != nil {
    log.Fatal(err)
  }

  fmt.Println(completion.Choices[0].Message.Content)
}

```

```java Java (SDK)
import com.meshapi.sdk.MeshAPI;
import com.meshapi.sdk.types.chat.ChatCompletionRequest;
import com.meshapi.sdk.types.chat.ChatCompletionResponse;
import com.meshapi.sdk.types.chat.ChatMessage;

MeshAPI client = MeshAPI.builder()
    .baseUrl("https://api.meshapi.ai")
    .token("rsk_01JXXXXXXXXXXXXXXXXXXXXXXX")
    .build();

ChatCompletionResponse completion = client.chat().completions().create(
    ChatCompletionRequest.builder()
        .model("openai/gpt-4o-mini")
        .addMessage(ChatMessage.system("You are a concise assistant."))
        .addMessage(ChatMessage.user("Summarize why async IO matters."))
        .temperature(0.2)
        .build()
);

System.out.println(completion.choices.get(0).message.content);

```

```ruby OpenAI-style completion response
require 'uri'
require 'net/http'

url = URI("https://api.meshapi.ai/v1/chat/completions")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = 'Bearer <token>'
request["Content-Type"] = 'application/json'

response = http.request(request)
puts response.read_body
```

```php OpenAI-style completion response
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meshapi.ai/v1/chat/completions', [
  'headers' => [
    'Authorization' => 'Bearer <token>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp OpenAI-style completion response
using RestSharp;

var client = new RestClient("https://api.meshapi.ai/v1/chat/completions");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "Bearer <token>");
request.AddHeader("Content-Type", "application/json");
IRestResponse response = client.Execute(request);
```

```swift OpenAI-style completion response
import Foundation

let headers = [
  "Authorization": "Bearer <token>",
  "Content-Type": "application/json"
]

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meshapi.ai/v1/chat/completions")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```javascript JavaScript
const response = await fetch("https://api.meshapi.ai/v1/chat/completions", {
  method: "POST",
  headers: {
    "Content-Type": "application/json",
    Authorization: "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  },
  body: JSON.stringify({
    model: "openai/gpt-4o-mini",
    messages: [
      { role: "system", content: "You are a concise assistant." },
      { role: "user", content: "Summarize why async IO matters." }
    ],
    temperature: 0.2
  }),
});

const data = await response.json();
console.log(data.choices[0].message.content);

```

```python Python
import httpx

response = httpx.post(
  "https://api.meshapi.ai/v1/chat/completions",
  headers={"Authorization": "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX"},
  json={
    "model": "openai/gpt-4o-mini",
    "messages": [
      {"role": "system", "content": "You are a concise assistant."},
      {"role": "user", "content": "Summarize why async IO matters."},
    ],
    "temperature": 0.2,
  },
)
response.raise_for_status()
print(response.json()["choices"][0]["message"]["content"])

```

```python Python (SDK)
from meshapi import MeshAPI, ChatCompletionParams, ChatMessage

client = MeshAPI(
  base_url="https://api.meshapi.ai",
  token="rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
)

completion = client.chat.completions.create(
  ChatCompletionParams(
    model="openai/gpt-4o-mini",
    messages=[
      ChatMessage(role="system", content="You are a concise assistant."),
      ChatMessage(role="user", content="Summarize why async IO matters."),
    ],
    temperature=0.2,
  )
)

print(completion.choices[0].message.content)

```

```typescript TypeScript (SDK)
import { MeshAPI } from "meshapi-node-sdk";

const client = new MeshAPI({
  baseUrl: "https://api.meshapi.ai",
  token: "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
});

const completion = await client.chat.completions.create({
  model: "openai/gpt-4o-mini",
  messages: [
    { role: "system", content: "You are a concise assistant." },
    { role: "user", content: "Summarize why async IO matters." },
  ],
  temperature: 0.2,
});

console.log(completion.choices[0]?.message.content);

```

```go Go (SDK)
package main

import (
  "context"
  "fmt"
  "log"

  meshapi "meshapi-go-sdk"
)

func main() {
  client := meshapi.New(meshapi.Config{
    BaseURL: "https://api.meshapi.ai",
    Token:   "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  })

  model := "openai/gpt-4o-mini"
  completion, err := client.Chat.Completions.Create(context.Background(), meshapi.ChatCompletionParams{
    Model: &model,
    Messages: []meshapi.ChatMessage{
      {Role: "system", Content: "You are a concise assistant."},
      {Role: "user", Content: "Summarize why async IO matters."},
    },
    Temperature: meshapi.Float64(0.2),
  })
  if err != nil {
    log.Fatal(err)
  }

  fmt.Println(completion.Choices[0].Message.Content)
}

```

```java Java (SDK)
import com.meshapi.sdk.MeshAPI;
import com.meshapi.sdk.types.chat.ChatCompletionRequest;
import com.meshapi.sdk.types.chat.ChatCompletionResponse;
import com.meshapi.sdk.types.chat.ChatMessage;

MeshAPI client = MeshAPI.builder()
    .baseUrl("https://api.meshapi.ai")
    .token("rsk_01JXXXXXXXXXXXXXXXXXXXXXXX")
    .build();

ChatCompletionResponse completion = client.chat().completions().create(
    ChatCompletionRequest.builder()
        .model("openai/gpt-4o-mini")
        .addMessage(ChatMessage.system("You are a concise assistant."))
        .addMessage(ChatMessage.user("Summarize why async IO matters."))
        .temperature(0.2)
        .build()
);

System.out.println(completion.choices.get(0).message.content);

```

```ruby Non-streaming completion
require 'uri'
require 'net/http'

url = URI("https://api.meshapi.ai/v1/chat/completions")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = 'Bearer <token>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"messages\": [\n    {\n      \"role\": \"system\",\n      \"content\": \"You are a concise assistant.\"\n    },\n    {\n      \"role\": \"user\",\n      \"content\": \"Explain vector databases in two sentences.\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o-mini\",\n  \"temperature\": 0.2,\n  \"max_tokens\": 120\n}"

response = http.request(request)
puts response.read_body
```

```php Non-streaming completion
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meshapi.ai/v1/chat/completions', [
  'body' => '{
  "messages": [
    {
      "role": "system",
      "content": "You are a concise assistant."
    },
    {
      "role": "user",
      "content": "Explain vector databases in two sentences."
    }
  ],
  "model": "openai/gpt-4o-mini",
  "temperature": 0.2,
  "max_tokens": 120
}',
  'headers' => [
    'Authorization' => 'Bearer <token>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Non-streaming completion
using RestSharp;

var client = new RestClient("https://api.meshapi.ai/v1/chat/completions");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "Bearer <token>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"messages\": [\n    {\n      \"role\": \"system\",\n      \"content\": \"You are a concise assistant.\"\n    },\n    {\n      \"role\": \"user\",\n      \"content\": \"Explain vector databases in two sentences.\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o-mini\",\n  \"temperature\": 0.2,\n  \"max_tokens\": 120\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Non-streaming completion
import Foundation

let headers = [
  "Authorization": "Bearer <token>",
  "Content-Type": "application/json"
]
let parameters = [
  "messages": [
    [
      "role": "system",
      "content": "You are a concise assistant."
    ],
    [
      "role": "user",
      "content": "Explain vector databases in two sentences."
    ]
  ],
  "model": "openai/gpt-4o-mini",
  "temperature": 0.2,
  "max_tokens": 120
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meshapi.ai/v1/chat/completions")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```javascript JavaScript
const response = await fetch("https://api.meshapi.ai/v1/chat/completions", {
  method: "POST",
  headers: {
    "Content-Type": "application/json",
    Authorization: "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  },
  body: JSON.stringify({
    model: "openai/gpt-4o-mini",
    messages: [
      { role: "system", content: "You are a concise assistant." },
      { role: "user", content: "Summarize why async IO matters." }
    ],
    temperature: 0.2
  }),
});

const data = await response.json();
console.log(data.choices[0].message.content);

```

```python Python
import httpx

response = httpx.post(
  "https://api.meshapi.ai/v1/chat/completions",
  headers={"Authorization": "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX"},
  json={
    "model": "openai/gpt-4o-mini",
    "messages": [
      {"role": "system", "content": "You are a concise assistant."},
      {"role": "user", "content": "Summarize why async IO matters."},
    ],
    "temperature": 0.2,
  },
)
response.raise_for_status()
print(response.json()["choices"][0]["message"]["content"])

```

```python Python (SDK)
from meshapi import MeshAPI, ChatCompletionParams, ChatMessage

client = MeshAPI(
  base_url="https://api.meshapi.ai",
  token="rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
)

completion = client.chat.completions.create(
  ChatCompletionParams(
    model="openai/gpt-4o-mini",
    messages=[
      ChatMessage(role="system", content="You are a concise assistant."),
      ChatMessage(role="user", content="Summarize why async IO matters."),
    ],
    temperature=0.2,
  )
)

print(completion.choices[0].message.content)

```

```typescript TypeScript (SDK)
import { MeshAPI } from "meshapi-node-sdk";

const client = new MeshAPI({
  baseUrl: "https://api.meshapi.ai",
  token: "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
});

const completion = await client.chat.completions.create({
  model: "openai/gpt-4o-mini",
  messages: [
    { role: "system", content: "You are a concise assistant." },
    { role: "user", content: "Summarize why async IO matters." },
  ],
  temperature: 0.2,
});

console.log(completion.choices[0]?.message.content);

```

```go Go (SDK)
package main

import (
  "context"
  "fmt"
  "log"

  meshapi "meshapi-go-sdk"
)

func main() {
  client := meshapi.New(meshapi.Config{
    BaseURL: "https://api.meshapi.ai",
    Token:   "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  })

  model := "openai/gpt-4o-mini"
  completion, err := client.Chat.Completions.Create(context.Background(), meshapi.ChatCompletionParams{
    Model: &model,
    Messages: []meshapi.ChatMessage{
      {Role: "system", Content: "You are a concise assistant."},
      {Role: "user", Content: "Summarize why async IO matters."},
    },
    Temperature: meshapi.Float64(0.2),
  })
  if err != nil {
    log.Fatal(err)
  }

  fmt.Println(completion.Choices[0].Message.Content)
}

```

```java Java (SDK)
import com.meshapi.sdk.MeshAPI;
import com.meshapi.sdk.types.chat.ChatCompletionRequest;
import com.meshapi.sdk.types.chat.ChatCompletionResponse;
import com.meshapi.sdk.types.chat.ChatMessage;

MeshAPI client = MeshAPI.builder()
    .baseUrl("https://api.meshapi.ai")
    .token("rsk_01JXXXXXXXXXXXXXXXXXXXXXXX")
    .build();

ChatCompletionResponse completion = client.chat().completions().create(
    ChatCompletionRequest.builder()
        .model("openai/gpt-4o-mini")
        .addMessage(ChatMessage.system("You are a concise assistant."))
        .addMessage(ChatMessage.user("Summarize why async IO matters."))
        .temperature(0.2)
        .build()
);

System.out.println(completion.choices.get(0).message.content);

```

```ruby Completion using a stored template
require 'uri'
require 'net/http'

url = URI("https://api.meshapi.ai/v1/chat/completions")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = 'Bearer <token>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": \"I need help understanding my invoice.\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o-mini\",\n  \"template\": \"support-reply\",\n  \"variables\": {\n    \"company\": \"Acme Cloud\",\n    \"plan\": \"Pro\"\n  }\n}"

response = http.request(request)
puts response.read_body
```

```php Completion using a stored template
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meshapi.ai/v1/chat/completions', [
  'body' => '{
  "messages": [
    {
      "role": "user",
      "content": "I need help understanding my invoice."
    }
  ],
  "model": "openai/gpt-4o-mini",
  "template": "support-reply",
  "variables": {
    "company": "Acme Cloud",
    "plan": "Pro"
  }
}',
  'headers' => [
    'Authorization' => 'Bearer <token>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Completion using a stored template
using RestSharp;

var client = new RestClient("https://api.meshapi.ai/v1/chat/completions");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "Bearer <token>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": \"I need help understanding my invoice.\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o-mini\",\n  \"template\": \"support-reply\",\n  \"variables\": {\n    \"company\": \"Acme Cloud\",\n    \"plan\": \"Pro\"\n  }\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Completion using a stored template
import Foundation

let headers = [
  "Authorization": "Bearer <token>",
  "Content-Type": "application/json"
]
let parameters = [
  "messages": [
    [
      "role": "user",
      "content": "I need help understanding my invoice."
    ]
  ],
  "model": "openai/gpt-4o-mini",
  "template": "support-reply",
  "variables": [
    "company": "Acme Cloud",
    "plan": "Pro"
  ]
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meshapi.ai/v1/chat/completions")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```javascript JavaScript
const response = await fetch("https://api.meshapi.ai/v1/chat/completions", {
  method: "POST",
  headers: {
    "Content-Type": "application/json",
    Authorization: "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  },
  body: JSON.stringify({
    model: "openai/gpt-4o-mini",
    messages: [
      { role: "system", content: "You are a concise assistant." },
      { role: "user", content: "Summarize why async IO matters." }
    ],
    temperature: 0.2
  }),
});

const data = await response.json();
console.log(data.choices[0].message.content);

```

```python Python
import httpx

response = httpx.post(
  "https://api.meshapi.ai/v1/chat/completions",
  headers={"Authorization": "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX"},
  json={
    "model": "openai/gpt-4o-mini",
    "messages": [
      {"role": "system", "content": "You are a concise assistant."},
      {"role": "user", "content": "Summarize why async IO matters."},
    ],
    "temperature": 0.2,
  },
)
response.raise_for_status()
print(response.json()["choices"][0]["message"]["content"])

```

```python Python (SDK)
from meshapi import MeshAPI, ChatCompletionParams, ChatMessage

client = MeshAPI(
  base_url="https://api.meshapi.ai",
  token="rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
)

completion = client.chat.completions.create(
  ChatCompletionParams(
    model="openai/gpt-4o-mini",
    messages=[
      ChatMessage(role="system", content="You are a concise assistant."),
      ChatMessage(role="user", content="Summarize why async IO matters."),
    ],
    temperature=0.2,
  )
)

print(completion.choices[0].message.content)

```

```typescript TypeScript (SDK)
import { MeshAPI } from "meshapi-node-sdk";

const client = new MeshAPI({
  baseUrl: "https://api.meshapi.ai",
  token: "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
});

const completion = await client.chat.completions.create({
  model: "openai/gpt-4o-mini",
  messages: [
    { role: "system", content: "You are a concise assistant." },
    { role: "user", content: "Summarize why async IO matters." },
  ],
  temperature: 0.2,
});

console.log(completion.choices[0]?.message.content);

```

```go Go (SDK)
package main

import (
  "context"
  "fmt"
  "log"

  meshapi "meshapi-go-sdk"
)

func main() {
  client := meshapi.New(meshapi.Config{
    BaseURL: "https://api.meshapi.ai",
    Token:   "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  })

  model := "openai/gpt-4o-mini"
  completion, err := client.Chat.Completions.Create(context.Background(), meshapi.ChatCompletionParams{
    Model: &model,
    Messages: []meshapi.ChatMessage{
      {Role: "system", Content: "You are a concise assistant."},
      {Role: "user", Content: "Summarize why async IO matters."},
    },
    Temperature: meshapi.Float64(0.2),
  })
  if err != nil {
    log.Fatal(err)
  }

  fmt.Println(completion.Choices[0].Message.Content)
}

```

```java Java (SDK)
import com.meshapi.sdk.MeshAPI;
import com.meshapi.sdk.types.chat.ChatCompletionRequest;
import com.meshapi.sdk.types.chat.ChatCompletionResponse;
import com.meshapi.sdk.types.chat.ChatMessage;

MeshAPI client = MeshAPI.builder()
    .baseUrl("https://api.meshapi.ai")
    .token("rsk_01JXXXXXXXXXXXXXXXXXXXXXXX")
    .build();

ChatCompletionResponse completion = client.chat().completions().create(
    ChatCompletionRequest.builder()
        .model("openai/gpt-4o-mini")
        .addMessage(ChatMessage.system("You are a concise assistant."))
        .addMessage(ChatMessage.user("Summarize why async IO matters."))
        .temperature(0.2)
        .build()
);

System.out.println(completion.choices.get(0).message.content);

```

```ruby Streaming completion
require 'uri'
require 'net/http'

url = URI("https://api.meshapi.ai/v1/chat/completions")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = 'Bearer <token>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": \"Write a three-line haiku about distributed systems.\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o-mini\",\n  \"stream\": true\n}"

response = http.request(request)
puts response.read_body
```

```php Streaming completion
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meshapi.ai/v1/chat/completions', [
  'body' => '{
  "messages": [
    {
      "role": "user",
      "content": "Write a three-line haiku about distributed systems."
    }
  ],
  "model": "openai/gpt-4o-mini",
  "stream": true
}',
  'headers' => [
    'Authorization' => 'Bearer <token>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Streaming completion
using RestSharp;

var client = new RestClient("https://api.meshapi.ai/v1/chat/completions");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "Bearer <token>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": \"Write a three-line haiku about distributed systems.\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o-mini\",\n  \"stream\": true\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Streaming completion
import Foundation

let headers = [
  "Authorization": "Bearer <token>",
  "Content-Type": "application/json"
]
let parameters = [
  "messages": [
    [
      "role": "user",
      "content": "Write a three-line haiku about distributed systems."
    ]
  ],
  "model": "openai/gpt-4o-mini",
  "stream": true
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meshapi.ai/v1/chat/completions")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```

```javascript JavaScript
const response = await fetch("https://api.meshapi.ai/v1/chat/completions", {
  method: "POST",
  headers: {
    "Content-Type": "application/json",
    Authorization: "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  },
  body: JSON.stringify({
    model: "openai/gpt-4o-mini",
    messages: [
      { role: "system", content: "You are a concise assistant." },
      { role: "user", content: "Summarize why async IO matters." }
    ],
    temperature: 0.2
  }),
});

const data = await response.json();
console.log(data.choices[0].message.content);

```

```python Python
import httpx

response = httpx.post(
  "https://api.meshapi.ai/v1/chat/completions",
  headers={"Authorization": "Bearer rsk_01JXXXXXXXXXXXXXXXXXXXXXXX"},
  json={
    "model": "openai/gpt-4o-mini",
    "messages": [
      {"role": "system", "content": "You are a concise assistant."},
      {"role": "user", "content": "Summarize why async IO matters."},
    ],
    "temperature": 0.2,
  },
)
response.raise_for_status()
print(response.json()["choices"][0]["message"]["content"])

```

```python Python (SDK)
from meshapi import MeshAPI, ChatCompletionParams, ChatMessage

client = MeshAPI(
  base_url="https://api.meshapi.ai",
  token="rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
)

completion = client.chat.completions.create(
  ChatCompletionParams(
    model="openai/gpt-4o-mini",
    messages=[
      ChatMessage(role="system", content="You are a concise assistant."),
      ChatMessage(role="user", content="Summarize why async IO matters."),
    ],
    temperature=0.2,
  )
)

print(completion.choices[0].message.content)

```

```typescript TypeScript (SDK)
import { MeshAPI } from "meshapi-node-sdk";

const client = new MeshAPI({
  baseUrl: "https://api.meshapi.ai",
  token: "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
});

const completion = await client.chat.completions.create({
  model: "openai/gpt-4o-mini",
  messages: [
    { role: "system", content: "You are a concise assistant." },
    { role: "user", content: "Summarize why async IO matters." },
  ],
  temperature: 0.2,
});

console.log(completion.choices[0]?.message.content);

```

```go Go (SDK)
package main

import (
  "context"
  "fmt"
  "log"

  meshapi "meshapi-go-sdk"
)

func main() {
  client := meshapi.New(meshapi.Config{
    BaseURL: "https://api.meshapi.ai",
    Token:   "rsk_01JXXXXXXXXXXXXXXXXXXXXXXX",
  })

  model := "openai/gpt-4o-mini"
  completion, err := client.Chat.Completions.Create(context.Background(), meshapi.ChatCompletionParams{
    Model: &model,
    Messages: []meshapi.ChatMessage{
      {Role: "system", Content: "You are a concise assistant."},
      {Role: "user", Content: "Summarize why async IO matters."},
    },
    Temperature: meshapi.Float64(0.2),
  })
  if err != nil {
    log.Fatal(err)
  }

  fmt.Println(completion.Choices[0].Message.Content)
}

```

```java Java (SDK)
import com.meshapi.sdk.MeshAPI;
import com.meshapi.sdk.types.chat.ChatCompletionRequest;
import com.meshapi.sdk.types.chat.ChatCompletionResponse;
import com.meshapi.sdk.types.chat.ChatMessage;

MeshAPI client = MeshAPI.builder()
    .baseUrl("https://api.meshapi.ai")
    .token("rsk_01JXXXXXXXXXXXXXXXXXXXXXXX")
    .build();

ChatCompletionResponse completion = client.chat().completions().create(
    ChatCompletionRequest.builder()
        .model("openai/gpt-4o-mini")
        .addMessage(ChatMessage.system("You are a concise assistant."))
        .addMessage(ChatMessage.user("Summarize why async IO matters."))
        .temperature(0.2)
        .build()
);

System.out.println(completion.choices.get(0).message.content);

```

```ruby Tool calling
require 'uri'
require 'net/http'

url = URI("https://api.meshapi.ai/v1/chat/completions")

http = Net::HTTP.new(url.host, url.port)
http.use_ssl = true

request = Net::HTTP::Post.new(url)
request["Authorization"] = 'Bearer <token>'
request["Content-Type"] = 'application/json'
request.body = "{\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": \"What's the weather in Paris right now?\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o\",\n  \"tools\": [\n    {\n      \"type\": \"function\",\n      \"function\": {\n        \"name\": \"get_weather\",\n        \"description\": \"Get the current weather for a city.\",\n        \"parameters\": {\n          \"type\": \"object\",\n          \"additionalProperties\": false,\n          \"properties\": {\n            \"city\": {\n              \"type\": \"string\"\n            }\n          },\n          \"required\": [\n            \"city\"\n          ]\n        }\n      }\n    }\n  ],\n  \"tool_choice\": \"auto\"\n}"

response = http.request(request)
puts response.read_body
```

```php Tool calling
<?php
require_once('vendor/autoload.php');

$client = new \GuzzleHttp\Client();

$response = $client->request('POST', 'https://api.meshapi.ai/v1/chat/completions', [
  'body' => '{
  "messages": [
    {
      "role": "user",
      "content": "What\'s the weather in Paris right now?"
    }
  ],
  "model": "openai/gpt-4o",
  "tools": [
    {
      "type": "function",
      "function": {
        "name": "get_weather",
        "description": "Get the current weather for a city.",
        "parameters": {
          "type": "object",
          "additionalProperties": false,
          "properties": {
            "city": {
              "type": "string"
            }
          },
          "required": [
            "city"
          ]
        }
      }
    }
  ],
  "tool_choice": "auto"
}',
  'headers' => [
    'Authorization' => 'Bearer <token>',
    'Content-Type' => 'application/json',
  ],
]);

echo $response->getBody();
```

```csharp Tool calling
using RestSharp;

var client = new RestClient("https://api.meshapi.ai/v1/chat/completions");
var request = new RestRequest(Method.POST);
request.AddHeader("Authorization", "Bearer <token>");
request.AddHeader("Content-Type", "application/json");
request.AddParameter("application/json", "{\n  \"messages\": [\n    {\n      \"role\": \"user\",\n      \"content\": \"What's the weather in Paris right now?\"\n    }\n  ],\n  \"model\": \"openai/gpt-4o\",\n  \"tools\": [\n    {\n      \"type\": \"function\",\n      \"function\": {\n        \"name\": \"get_weather\",\n        \"description\": \"Get the current weather for a city.\",\n        \"parameters\": {\n          \"type\": \"object\",\n          \"additionalProperties\": false,\n          \"properties\": {\n            \"city\": {\n              \"type\": \"string\"\n            }\n          },\n          \"required\": [\n            \"city\"\n          ]\n        }\n      }\n    }\n  ],\n  \"tool_choice\": \"auto\"\n}", ParameterType.RequestBody);
IRestResponse response = client.Execute(request);
```

```swift Tool calling
import Foundation

let headers = [
  "Authorization": "Bearer <token>",
  "Content-Type": "application/json"
]
let parameters = [
  "messages": [
    [
      "role": "user",
      "content": "What's the weather in Paris right now?"
    ]
  ],
  "model": "openai/gpt-4o",
  "tools": [
    [
      "type": "function",
      "function": [
        "name": "get_weather",
        "description": "Get the current weather for a city.",
        "parameters": [
          "type": "object",
          "additionalProperties": false,
          "properties": ["city": ["type": "string"]],
          "required": ["city"]
        ]
      ]
    ]
  ],
  "tool_choice": "auto"
] as [String : Any]

let postData = JSONSerialization.data(withJSONObject: parameters, options: [])

let request = NSMutableURLRequest(url: NSURL(string: "https://api.meshapi.ai/v1/chat/completions")! as URL,
                                        cachePolicy: .useProtocolCachePolicy,
                                    timeoutInterval: 10.0)
request.httpMethod = "POST"
request.allHTTPHeaderFields = headers
request.httpBody = postData as Data

let session = URLSession.shared
let dataTask = session.dataTask(with: request as URLRequest, completionHandler: { (data, response, error) -> Void in
  if (error != nil) {
    print(error as Any)
  } else {
    let httpResponse = response as? HTTPURLResponse
    print(httpResponse)
  }
})

dataTask.resume()
```