diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9985a6f..6498416 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,13 +29,27 @@ jobs: id: prompt uses: ./ with: - messages: '[{"role": "user", "content": "What is the capital of France?"}]' - model: openai/o4-mini - org: ${{ github.repository_owner}} - max-tokens: 100 + payload: | + model: openai/gpt-4.1-mini + messages: + - role: system + content: You are a helpful assistant + - role: user + content: What is the capital of France + max_tokens: 100 + temperature: 0.9 + top_p: 0.9 - name: Echo outputs - continue-on-error: true run: | - echo "response: ${{ steps.prompt.outputs.response }}" - echo "response-raw: ${{ steps.prompt.outputs.response-raw }}" + echo "response:" + echo "${{ steps.prompt.outputs.response }}" + + echo "response-file:" + echo "${{ steps.prompt.outputs.response-file }}" + + echo "response-file contents:" + cat "${{ steps.prompt.outputs.response-file }}" | jq + + echo "payload:" + echo "${{ steps.prompt.outputs.payload }}" diff --git a/README.md b/README.md index 8e57d58..7600852 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,8 @@ ## Usage Examples +[Compare available AI models](https://docs.github.com/en/copilot/using-github-copilot/ai-models/choosing-the-right-ai-model-for-your-task "Comparison of AI models for GitHub.") to choose the best one for your use-case. + ```yml on: issues: @@ -33,15 +35,18 @@ jobs: payload: | model: openai/gpt-4.1-mini messages: + - role: system + content: You are a helpful assistant running within GitHub CI. - role: user content: Concisely summarize this GitHub issue titled ${{ github.event.issue.title }}: ${{ github.event.issue.body }} + max_tokens: 100 temperature: 0.9 top_p: 0.9 - name: Comment summary run: gh issue comment $NUMBER --body "$SUMMARY" env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_TOKEN: ${{ github.token }} NUMBER: ${{ github.event.issue.number }} SUMMARY: ${{ steps.prompt.outputs.response }} ``` @@ -50,23 +55,29 @@ jobs: ## Inputs -Only `messages` and `model` are required inputs. [Compare available AI models](https://docs.github.com/en/copilot/using-github-copilot/ai-models/choosing-the-right-ai-model-for-your-task "Comparison of AI models for GitHub.") to choose the best one for your use-case. +Either `payload` or `payload-file` is required. + +| Type | Name | Description | +| ------ | -------------------- | ----------------------------------------------------------------------------------------------------------- | +| Data | `payload` | Body parameters of the inference request in YAML format.
Example: `model…` | +| Data | `payload-file` | Path to a file containing the body parameters of the inference request.
Example: `./payload.{json,yml}` | +| Config | `show-payload` | Whether to show the payload in the logs.
Default: `true` | +| Config | `show-response` | Whether to show the response content in the logs.
Default: `true` | +| Admin | `github-api-version` | GitHub API version.
Default: `2022-11-28` | +| Admin | `github-token` | GitHub token.
Default: `github.token` | +| Admin | `org` | Organization for request attribution.
Example: `github.repository_owner` | -| Name | Description | -| -------------------- | ---------------------------------------------------------------------------------------------------- | -| `github-api-version` | GitHub API version.
Default: `2022-11-28` | -| `github-token` | GitHub token.
Default: `github.token` | -| `max-tokens` | Maximum number of tokens to generate in the completion.
Example: `1000` | -| `messages` | Messages to send to the model in JSON format.
Example: `[{"role": "user", "content": "Hello!"}]` | -| `model` | Model to use for inference.
Example: `openai/o4-mini` | -| `org` | Organization to which the request should be attributed.
Example: `github.repository_owner` | +
## Outputs -| Name | Description | -| -------------- | -------------------------------------------- | -| `response` | Response content from the inference request. | -| `response-raw` | Raw, complete response in JSON format. | +| Name | Description | +| -------------- | -------------------------------------------------------- | +| `response` | Response content from the inference request. | +| `response-raw` | File path containing the complete, raw response. | +| `payload` | Body parameters of the inference request in JSON format. | + +
## Security diff --git a/SECURITY.md b/SECURITY.md index 754f224..35aa759 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -17,4 +17,4 @@ Integrating security in your CI/CD pipeline is critical to practicing DevSecOps. ## Reporting a Vulnerability -You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead, sensitive bugs must be sent by email to or reported via [Security Advisory](https://github.com/op5dev/ai-inference-request/security/advisories/new "Create a new security advisory."). +You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead, sensitive bugs must be sent by email to or reported via [Security Advisory](https://github.com/op5dev/ai-inference-request/security/advisories/new "Create a new security advisory."). diff --git a/action.yml b/action.yml index ec5bf41..5fdc321 100644 --- a/action.yml +++ b/action.yml @@ -1,32 +1,36 @@ --- name: AI Inference Request via GitHub Action author: Rishav Dhar (https://rdhar.dev) -description: AI inference request GitHub Models with this GitHub Action. +description: AI inference request GitHub Models via this GitHub Action. inputs: github-api-version: default: "2022-11-28" - description: "GitHub API version (e.g., `2022-11-28`)." + description: GitHub API version (e.g., `2022-11-28`) required: false github-token: - default: ${{ github.token }} - description: "GitHub token (e.g., `github.token`)." + default: "${{ github.token }}" + description: GitHub token (e.g., `github.token`) required: false - max-tokens: + org: default: "" - description: "Maximum number of tokens to generate in the completion (e.g., `1000`)." + description: Organization for request attribution (e.g., `github.repository_owner`) required: false - messages: - default: "" - description: 'Messages to send to the model in JSON format (e.g., `[{"role": "user", "content": "Hello!"}]`).' - required: true - model: + payload: default: "" - description: "Model to use for inference (e.g., `openai/o4-mini`)." - required: true - org: + description: Body parameters of the inference request in YAML format (e.g., `model…`) + required: false + payload-file: default: "" - description: "Organization to which the request should be attributed (e.g., `github.repository_owner`)." + description: Path to a file containing the body parameters of the inference request (e.g., `./payload.{json,yml}`) + required: false + show-payload: + default: "true" + description: Whether to show the payload in the logs (e.g., `true`) + required: false + show-response: + default: "true" + description: Whether to show the response content in the logs (e.g., `true`) required: false runs: @@ -38,31 +42,62 @@ runs: API_VERSION: ${{ inputs.github-api-version }} GH_TOKEN: ${{ inputs.github-token }} ORG: ${{ inputs.org != '' && format('orgs/{0}/', inputs.org) || '' }} + PAYLOAD: ${{ inputs.payload }} + PAYLOAD_FILE: ${{ inputs.payload-file }} + SHOW_PAYLOAD: ${{ inputs.show-payload }} + SHOW_RESPONSE: ${{ inputs.show-response }} run: | - GH_HOST=$(echo $GITHUB_SERVER_URL | sed 's/.*:\/\///') + # AI inference request + if [[ -n "$PAYLOAD_FILE" ]]; then + # Check if the file exists + if [[ ! -f "$PAYLOAD_FILE" ]]; then + echo "Error: Payload file '$PAYLOAD_FILE' does not exist." >&2 + exit 1 + fi + # Determine whether the format is JSON (starts with '{') or YAML (default) + first_char=$(sed -n 's/^[[:space:]]*\(.\).*/\1/p; q' "$PAYLOAD_FILE") + if [[ "$first_char" == '{' ]]; then + body=$(cat "$PAYLOAD_FILE") + else + body=$(yq --output-format json "$PAYLOAD_FILE") + fi + else + body=$(echo "$PAYLOAD" | yq --output-format json) + fi + echo "payload_json=$(echo $body)" >> $GITHUB_OUTPUT + if [[ "${SHOW_PAYLOAD,,}" == "true" ]]; then echo "$body"; fi + + # Create a temporary file to store the response + temp_file=$(mktemp) - response_raw=$(curl --request POST --location https://models.github.ai/${ORG}inference/chat/completions \ + # Send the AI inference request via GitHub API + curl \ + --request POST \ + --no-progress-meter \ + --location "https://models.github.ai/${ORG}inference/chat/completions" \ --header "Accept: application/vnd.github+json" \ --header "Authorization: Bearer $GH_TOKEN" \ --header "Content-Type: application/json" \ --header "X-GitHub-Api-Version: $API_VERSION" \ - --data '{ - "messages": ${{ inputs.messages }}, - "model": "${{ inputs.model }}" - }' - ) + --data "$(echo $body | jq --compact-output --exit-status)" \ + &> "$temp_file" - echo $response_raw - echo "response_raw=$response_raw" >> $GITHUB_OUTPUT - echo "response=$response_raw | jq --raw-output '.choices[0].message.content'" >> $GITHUB_OUTPUT + # In addition to the temporary file containing the full response, + # return the first 2**18 bytes of the response content (GitHub's limit) + echo "response_file=$temp_file" >> $GITHUB_OUTPUT + echo "response=$(cat $temp_file | jq --raw-output '.choices[0].message.content' | head --bytes 262144 --silent)" >> $GITHUB_OUTPUT + if [[ "${SHOW_RESPONSE,,}" == "true" ]]; then cat "$temp_file" | jq --raw-output '.choices[0].message.content' || true; fi outputs: + payload: + description: Body parameters of the inference request in JSON format. + value: ${{ steps.request.outputs.payload_json }} response: - description: "Response content from the inference request." + description: Response content from the inference request. value: ${{ steps.request.outputs.response }} - response-raw: - description: "Raw, complete response in JSON format." - value: ${{ steps.request.outputs.response_raw }} + response-file: + description: File path containing the complete, raw response in JSON format. + value: ${{ steps.request.outputs.response_file }} branding: color: white