Enrich data with LLMs
Introduction
In this recipe, you'll learn how to interact with OpenAI's API to send prompts and receive responses. This can be used to integrate AI-driven features, such as generating text or completing tasks, into your API. In the example below, we'll hard-code a prompt and have OpenAI apply it to existing content in our supergraph.
Before continuing, ensure you have:
- A local Hasura DDN project.
- A lambda connector added to your project.
- An OpenAI API key (You can get one by signing up at OpenAI).
Recipe
Step 1. Write the function
- TypeScript
- Python
- Go
npm install openai
// We can store this in the project's .env file and reference it here
const OPENAI_API_KEY =
"your_openai_api_key";
const client = new OpenAI({
apiKey: OPENAI_API_KEY,
});
/**
* @readonly
*/
export async function generateSeoDescription(input: string): Promise<string | null> {
const response = await client.chat.completions.create({
messages: [
{
role: "system",
content:
"You are a senior marketing associate. Take the product description provided and improve upon it to rank well with SEO.",
},
{ role: "user", content: input },
],
model: "gpt-4o",
});
return response.choices[0].message.content;
}
openai==1.46.1
from hasura_ndc import start
from hasura_ndc.function_connector import FunctionConnector
from openai import OpenAI
connector = FunctionConnector()
# We can store this in the project's .env file and referene it here
OPENAI_API_KEY = "your_openai_api_key"
client = OpenAI(
api_key=OPENAI_API_KEY,
)
@connector.register_query
def generate_seo_description(input: str) -> str:
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a senior marketing associate. Take the product description provided and improve upon it to rank well with SEO."},
{"role": "user", "content": input}
],
)
return response.choices[0].message.content
if __name__ == "__main__":
start(connector)
package functions
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"hasura-ndc.dev/ndc-go/types"
)
type SEOArguments struct {
Input string `json:"input"`
}
// OpenAIRequest represents the request payload for OpenAI's Chat API
type OpenAIRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
}
// Message defines the structure for messages sent to OpenAI
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// OpenAIResponse represents the response from OpenAI
type OpenAIResponse struct {
Choices []struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
}
// GenerateSeoDescription sends a request to OpenAI and generates an SEO-optimized product description
func FunctionGenerateSeoDescription(ctx context.Context, state *types.State, arguments *SEOArguments) (string, error) {
// We can store this in the project's .env file and reference it here
apiKey := "your_openai_api_key"
// Prepare the request payload
reqBody, _ := json.Marshal(OpenAIRequest{
Model: "gpt-4",
Messages: []Message{
{Role: "system", Content: "You are a senior marketing associate. Take the product description provided and improve upon it to rank well with SEO."},
{Role: "user", Content: arguments.Input},
},
})
// Create a new request to the OpenAI Chat API
req, _ := http.NewRequest("POST", "https://api.openai.com/v1/chat/completions", bytes.NewBuffer(reqBody))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+apiKey)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send request: %v", err)
}
defer resp.Body.Close()
// Parse the response from OpenAI
body, _ := ioutil.ReadAll(resp.Body)
var openAIResp OpenAIResponse
json.Unmarshal(body, &openAIResp)
return openAIResp.Choices[0].Message.Content, nil
}
Step 2. Track your function
To add your function, generate the related metadata that will link together any functions in your lambda connector's source files and your API:
ddn connector introspect <connector_name>
Then, you can generate an hml
file for the function using the following command:
ddn command add <connector_name> "*"
Step 3. Create a relationship (optional)
Assuming the input argument's type matches that of a type belonging to one or more of your models, you can create a relationship to the command. This will enable you to make nested queries that will invoke your custom business logic using the value of the field from the related model!
Create a relationship in the corresponding model's HML file.
---
kind: Relationship
version: v1
definition:
name: optimizedDescription
sourceType: Products
target:
command:
name: generateSeoDescription
mapping:
- source:
fieldPath:
- fieldName: description
target:
argument:
argumentName: input
Step 4. Test your function
Create a new build of your supergraph:
ddn supergraph build local
In your project's explorer, you should see the new function exposed as a type and should be able to make a query like this:
If you created a relationship, you can make a query like this, too:
Wrapping up
In this guide, you learned how to send prompts to OpenAI's API and receive responses in your application. By leveraging lambda connectors with relationships, you can easily incorporate AI-driven capabilities into your existing supergraph.
Learn more about lambda connectors
- TypeScript Node.js connector.
- Python connector.
- Go connector.