ビュー:

Trend Vision OneAI Guard統合のためのPythonコードの例。

以下はAI Guardをアプリケーションに統合する方法の例です。
import openai
import os
import requests


# Get your Trend Vision One API key from environment variable
api_key = os.environ.get("V1_API_KEY")
if not api_key:
    raise ValueError("Missing V1_API_KEY environment variable")

# Set your OpenAI API key
openai.api_key = "your-model-api-key"

# User prompt stored in a variable
user_prompt = "Explain the concept of machine learning in simple terms."

# Use the requests library to make the direct call to the /applyGuardrails endpoint
headers = {
    "Authorization": f"Bearer {api_key}",
    "Content-Type": "application/json",
    "TMV1-Application-Name": "my-ai-application",  # REQUIRED: Your application name
    "TMV1-Request-Type": "SimpleRequestGuardrails",  # Optional: Defaults to SimpleRequestGuardrails
    "Prefer": "return=minimal"  # Optional: "return=minimal" (default) or "return=representation" for detailed response
    "Accept": "application/json"
}

payload = {
    "prompt": user_prompt
}

response = requests.post(
    f"https://api.{region}.xdr.trendmicro.com/v3.0/aiSecurity/applyGuardrails",
    headers=headers,
    json=payload
)

# Check the response body for the 'action' field 
unsafe = False
if response.status_code == 200:
    response_data = response.json()
    action = response_data.get("action")
    if action and action == "Block":
        unsafe = True
        print(f"User prompt blocked. Reasons: {response_data.get('reasons', [])}")
else:
    print(f"Error calling guardrails API: {response.status_code}")
    exit(1)


if not unsafe:
    # Send the prompt to the OpenAI LLM
    openai_response = openai.ChatCompletion.create(
        model="gpt-4",
        messages=[
            {"role": "user", "content": user_prompt}
        ],
        max_tokens=150,
        temperature=0.7
    )



    # Payload can also be the OpenAI response object from LLM
    response_headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json",
        "TMV1-Application-Name": "my-ai-application",
        "TMV1-Request-Type": "OpenAIChatCompletionResponseV1",  # Specify OpenAI response format
        "Prefer": "return=minimal"
        "Accept": "application/json"
    }

    # The payload is the complete OpenAI response object
    guard_response = requests.post(
        f"https://api.{region}.xdr.trendmicro.com/v3.0/aiSecurity/applyGuardrails",
        headers=response_headers,
        json=openai_response
    )

    # Check the response body for the 'action' field
    if guard_response.status_code == 200:
        guard_data = guard_response.json()
        action = guard_data.get("action")
        if action and action == "Block":
            print(f"LLM response blocked. Reasons: {guard_data.get('reasons', [])}")
            exit(0)
    else:
        print(f"Error evaluating LLM response: {guard_response.status_code}")
        exit(1)

    # Print the response
    print(openai_response.choices[0].message.content.strip())
else:
    print("User prompt is considered unsafe. No response will be generated.")
    exit(0)