# Swift Examples - Perplexity

##### Service setup

### Create a Perplexity service in the AIProxy dashboard

Follow the [integration guide](/docs/integration-guide.html), selecting the Perplexity icon on the 'Create a New Service' form.

### How to create a chat completion with Perplexity

```
import AIProxy

/* Uncomment for BYOK use cases */
// let perplexityService = AIProxy.perplexityDirectService(
// unprotectedAPIKey: "your-perplexity-key"
// )

/* Uncomment for all other production use cases */
// let perplexityService = AIProxy.perplexityService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )

do {
    let response = try await perplexityService.chatCompletionRequest(body: .init(
        messages: [.user(content: "How many national parks in the US?")],
        model: "llama-3.1-sonar-small-128k-online"
    ))

    print(
        """
        Received from Perplexity:
        \(response.choices.first?.message?.content ?? "no content")

        With citations:
        \(response.citations ?? ["none"])

        Using:
        \(response.usage?.promptTokens ?? 0) prompt tokens
        \(response.usage?.completionTokens ?? 0) completion tokens
        \(response.usage?.totalTokens ?? 0) total tokens
        """
    )
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
    print("Could not create perplexity chat completion: \(error.localizedDescription)")
}
```

### How to create a streaming chat completion with Perplexity

```
import AIProxy

/* Uncomment for BYOK use cases */
// let perplexityService = AIProxy.perplexityDirectService(
// unprotectedAPIKey: "your-perplexity-key"
// )

/* Uncomment for all other production use cases */
// let perplexityService = AIProxy.perplexityService(
// partialKey: "partial-key-from-your-developer-dashboard",
// serviceURL: "service-url-from-your-developer-dashboard"
// )

do {
    let stream = try await perplexityService.streamingChatCompletionRequest(body: .init(
        messages: [.user(content: "How many national parks in the US?")],
        model: "llama-3.1-sonar-small-128k-online"
    ))

    var lastChunk: PerplexityChatCompletionResponseBody?
    for try await chunk in stream {
        print(chunk.choices.first?.delta?.content ?? "")
        lastChunk = chunk
    }

    if let lastChunk = lastChunk {
        print(
            """
            Citations:
            \(lastChunk.citations ?? ["none"])

            Using:
            \(lastChunk.usage?.promptTokens ?? 0) prompt tokens
            \(lastChunk.usage?.completionTokens ?? 0) completion tokens
            \(lastChunk.usage?.totalTokens ?? 0) total tokens
            """
        )
    }
} catch AIProxyError.unsuccessfulRequest(let statusCode, let responseBody) {
    print("Received non-200 status code: \(statusCode) with response body: \(responseBody)")
} catch {
    print("Could not create perplexity streaming chat completion: \(error.localizedDescription)")
}
```


