An example of Go code for the Trend Vision One AI Guard integration.
The following is an example of how to integrate AI Guard with your application.
package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
openai "github.com/openai/openai-go"
)
// GuardrailsResponse represents the response from the applyGuardrails API
type GuardrailsResponse struct {
ID string `json:"id"`
Action string `json:"action"`
Reasons []string `json:"reasons"`
}
func main() {
// Get your Trend Vision One API key from environment variable
apiKey := os.Getenv("V1_API_KEY")
if apiKey == "" {
fmt.Errorf("failed to fetch Trend Vision One API key")
os.Exit(1)
}
// User prompt stored in a variable
userPrompt := "Explain the concept of machine learning in simple terms."
// Prepare the request payload
payload := map[string]string{
"prompt": userPrompt,
}
jsonPayload, err := json.Marshal(payload)
if err != nil {
fmt.Errorf("failed to marshal applyGuardrails request payload: %w", err)
os.Exit(1)
}
// Prepare the HTTP request
url := "https://api.<region>.xdr.trendmicro.com/v3.0/aiSecurity/applyGuardrails"
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
if err != nil {
fmt.Errorf("failed to create HTTP request for applyGuardrails endpoint: %w", err)
os.Exit(1)
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("TMV1-Application-Name", "your-app-name") // Required header
req.Header.Set("Prefer", "return=minimal") // Optional: Set to "return=representation" for more detailed responses
// Send the request
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Errorf("failed to send applyGuardrails request: %w", err)
os.Exit(1)
}
defer resp.Body.Close()
// Check the response for the 'action' in JSON body
body, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Errorf("failed to parse applyGuardrails response: %w", err)
os.Exit(1)
}
var result GuardrailsResponse
if err := json.Unmarshal(body, &result); err != nil {
fmt.Errorf("failed to unmarshal applyGuardrails response: %w", err)
os.Exit(1)
}
unsafe := false
if result.Action == "Block" {
unsafe = true
}
fmt.Printf("Unsafe: %v\n", unsafe)
if !unsafe {
openaiApiKey := os.Getenv("OPENAI_API_KEY")
if openaiApiKey == "" {
fmt.Errorf("failed to fetch OpenAI API key")
os.Exit(1)
}
// Use OpenAI Go SDK
ctx := context.Background()
openaiClient := openai.NewClient(openaiApiKey)
resp, err := openaiClient.CreateChatCompletion(ctx, &openai.CreateChatCompletionRequest{
Model: "gpt-4",
Messages: []openai.ChatCompletionMessage{{
Role: "user",
Content: userPrompt,
}},
MaxTokens: 150,
Temperature: 0.7,
})
if err != nil {
fmt.Errorf("failed to create chat completion: %w", err)
os.Exit(1)
}
// Marshal OpenAI response to send to Guardrails endpoint
guardRespPayload, err := json.Marshal(resp)
if err != nil {
fmt.Errorf("failed to marshal OpenAI response: %w", err)
os.Exit(1)
}
guardReq, err := http.NewRequest("POST", url, bytes.NewBuffer(guardRespPayload))
if err != nil {
fmt.Errorf("failed to create HTTP request for applyGuardrails endpoint: %w", err)
os.Exit(1)
}
guardReq.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey))
guardReq.Header.Set("Content-Type", "application/json")
guardReq.Header.Set("TMV1-Application-Name", "your-app-name") // Required header
guardReq.Header.Set("TMV1-Request-Type", "OpenAIChatCompletionResponseV1") // Specify OpenAI response format
guardReq.Header.Set("Prefer", "return=minimal")
guardResp, err := client.Do(guardReq)
if err != nil {
fmt.Errorf("failed to send applyGuardrails request: %w", err)
os.Exit(1)
}
defer guardResp.Body.Close()
// Read and parse JSON response
guardBody, err := io.ReadAll(guardResp.Body)
if err != nil {
fmt.Errorf("failed to parse applyGuardrails response: %w", err)
os.Exit(1)
}
var guardResult GuardrailsResponse
if err := json.Unmarshal(guardBody, &guardResult); err != nil {
fmt.Errorf("failed to unmarshal applyGuardrails response: %w", err)
os.Exit(1)
}
if guardResult.Action == "Block" {
fmt.Println("LLM response is considered unsafe. No response will be shown.")
os.Exit(0)
}
// Print the LLM response (extracting the text)
if len(resp.Choices) > 0 {
fmt.Println(resp.Choices[0].Message.Content)
}
} else {
fmt.Println("User prompt is considered unsafe. No response will be generated.")
os.Exit(0)
}
}
