Views:

An example of JavaScript code for the Trend Vision One AI Guard integration.

The following is an example of how to integrate AI Guard with your application.
const fetch = require('node-fetch');
// Get your Trend Vision One API key from environment variable
const apiKey = process.env.V1_API_KEY;
if (!apiKey) {
    throw new Error('Missing V1_API_KEY environment variable');
}
// User prompt stored in a variable
const userPrompt = 'Explain the concept of machine learning in simple terms.';
// Prepare the request payload
const payload = {
    prompt: userPrompt, 
};
const headers = {
    'Authorization': `Bearer ${apiKey}`,
    'Content-Type': 'application/json',
    'TMV1-Application-Name': 'my-ai-application',  // Required header
    'Prefer': 'return=minimal', // Optional header: Set to 'return=representation' for more detailed responses
    'Accept': 'application/json'
};
const url = 'https://api.<region>.xdr.trendmicro.com/v3.0/aiSecurity/applyGuardrails';  
fetch(url, {
    method: 'POST',
    headers: headers,
    body: JSON.stringify(payload),
})
    .then(async response => {
        const responseData = await response.json();  // Parse response body
        let unsafe = false;
        const action = responseData.action;  // Get action from response body 
        if (action && action.toLowerCase() === 'block') {
            unsafe = true;
        }
        console.log('Unsafe:', unsafe);
        if (!unsafe) {
            // Call OpenAI API (replace with your actual API key)
            const openaiApiKey = process.env.OPENAI_API_KEY;
            if (!openaiApiKey) {
                throw new Error('Missing OPENAI_API_KEY environment variable');
            }
            const openaiPayload = {
                model: 'gpt-4',
                messages: [
                    { role: 'user', content: userPrompt }
                ],
                max_tokens: 150,
                temperature: 0.7
            };
            const openaiResp = await fetch('https://api.openai.com/v1/chat/completions', {
                method: 'POST',
                headers: {
                    'Authorization': `Bearer ${openaiApiKey}`,
                    'Content-Type': 'application/json'
                },
                body: JSON.stringify(openaiPayload)
            });
            const openaiResult = await openaiResp.json();
            // Send OpenAI response to AI Guard endpoint
            const guardHeaders = {
                ...headers,
                'TMV1-Request-Type': 'OpenAIChatCompletionResponseV1'  // Specify request type
            };
            const guardResp = await fetch(url, {
                method: 'POST',
                headers: guardHeaders,
                body: JSON.stringify(openaiResult)
            });
            const guardData = await guardResp.json();  // Parse response body
            const guardAction = guardData.action;  // Get action from response body 
            if (guardAction && guardAction.toLowerCase() === 'block') {
                console.log('LLM response is considered unsafe. No response will be shown.');
                process.exit(0);
            }
            // Print the LLM response (extracting the text)
            if (openaiResult.choices && openaiResult.choices.length > 0) {
                console.log(openaiResult.choices[0].message.content);
            }
        } else {
            console.log('User prompt is considered unsafe. No response will be generated.');
            process.exit(0);
        }
    })
    .catch(error => {
        console.error('Error:', error);
    });