Skip to content

AWS Backup Feature Discovery #18

AWS Backup Feature Discovery

AWS Backup Feature Discovery #18

name: AWS Backup Feature Discovery
on:
schedule:
# Run weekly on Sundays at 00:00 UTC
- cron: '0 0 * * 0'
workflow_dispatch:
inputs:
provider_version:
description: 'AWS Provider version to check (default: latest)'
required: false
default: 'latest'
type: string
# Validate semantic version format or "latest"
pattern: '^(latest|[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9\-]+)?(\+[a-zA-Z0-9\-]+)?)$'
dry_run:
description: 'Run analysis without creating issues'
required: false
default: false
type: boolean
force_scan:
description: 'Force full scan even if no changes detected'
required: false
default: false
type: boolean
jobs:
discover-backup-features:
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: write
issues: write
actions: read
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Create feature tracker directory
run: |
mkdir -p .github/feature-tracker
# Create initial tracker file if it doesn't exist
if [ ! -f .github/feature-tracker/backup-features.json ]; then
cat > .github/feature-tracker/backup-features.json << 'EOF'
{
"last_scan": "1970-01-01T00:00:00Z",
"provider_version": "0.0.0",
"scan_history": [],
"features": {},
"issues_created": []
}
EOF
echo "Created initial feature tracker file"
fi
- name: Validate inputs
run: |
# Validate provider_version format
PROVIDER_VERSION="${{ inputs.provider_version || 'latest' }}"
if [[ ! "$PROVIDER_VERSION" =~ ^(latest|[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9\-]+)?(\+[a-zA-Z0-9\-]+)?)$ ]]; then
echo "::error::Invalid provider_version format: $PROVIDER_VERSION"
echo "Must be 'latest' or semantic version (e.g., '5.82.0')"
exit 1
fi
echo "Provider version validation passed: $PROVIDER_VERSION"
- name: Environment Diagnostics & Cache Clearing
run: |
echo "=== Environment Snapshot ==="
echo "Node.js: $(node --version)"
echo "NPM: $(npm --version)"
echo "NPX: $(npx --version)"
echo "Runner: $(uname -a)"
echo "Disk space: $(df -h /)"
echo "Memory: $(free -h)"
echo "Network test: $(curl -s -o /dev/null -w '%{http_code}' https://registry.npmjs.org/)"
echo "=== Cache Clearing ==="
echo "Clearing NPM caches..."
npm cache clean --force || echo "npm cache clean failed"
rm -rf ~/.npm/_cacache ~/.npm/_logs || echo "npm cache dir cleanup failed"
echo "Clearing NPX cache..."
rm -rf ~/.npm/_npx || echo "npx cache cleanup failed"
echo "=== Testing MCP Package Access ==="
echo "Testing Terraform MCP server package access..."
npm view @modelcontextprotocol/server-terraform version || echo "Package lookup failed"
echo "Testing Context7 MCP server package access..."
npm view @upstash/context7-mcp version || echo "Package lookup failed"
echo "=== Pre-installation Test ==="
echo "Testing NPX with verbose output..."
npx --version
echo "Environment diagnostics complete!"
- name: Run Claude Code Feature Discovery
id: claude-discovery
uses: anthropics/claude-code-action@beta
env:
CLAUDE_TOKEN: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
GITHUB_TOKEN: ${{ secrets.CLAUDE_ISSUE_TOKEN }}
with:
claude_code_oauth_token: ${{ env.CLAUDE_TOKEN }}
mode: agent
github_token: ${{ secrets.CLAUDE_ISSUE_TOKEN }}
# MCP Configuration for Terraform and Context7 documentation access
mcp_config: |
{
"mcpServers": {
"terraform": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"hashicorp/terraform-mcp-server"
]
},
"context7": {
"command": "npx",
"args": [
"-y",
"@upstash/context7-mcp@latest"
]
}
}
}
# Allow necessary tools for feature discovery
allowed_tools: |
mcp__terraform__search_providers
mcp__terraform__get_provider_details
mcp__terraform__get_latest_provider_version
mcp__terraform__search_modules
mcp__terraform__get_module_details
mcp__terraform__get_latest_module_version
mcp__terraform__search_policies
mcp__terraform__get_policy_details
mcp__context7__resolve-library-id
mcp__context7__get-library-docs
Bash(git diff)
Bash(git status)
Bash(gh issue create)
Bash(gh issue list)
Bash(jq)
Bash(cat)
Bash(echo)
# Direct prompt for Claude Code to perform feature discovery
direct_prompt: |
# AWS Backup Feature Discovery Analysis
You are performing automated feature discovery for the terraform-aws-backup module.
## Objective
Analyze the latest AWS provider Backup resources and compare them with the current module implementation to identify:
1. **New Features**: AWS Backup resources/arguments not yet implemented
2. **Deprecations**: Features marked as deprecated in the provider
3. **Bug Fixes**: Important fixes mentioned in provider changelogs
## Configuration
- Provider Version: ${{ inputs.provider_version || 'latest' }}
- Dry Run Mode: ${{ inputs.dry_run }}
- Force Scan: ${{ inputs.force_scan }}
## Process
### Step 1: Load Current State
Read the feature tracking database:
```bash
cat .github/feature-tracker/backup-features.json
```
### Step 2: Fetch AWS Provider Backup Documentation
Use the Terraform MCP server to get the latest Backup documentation:
1. Use `mcp__terraform-server__resolveProviderDocID` with:
- providerName: "aws"
- providerNamespace: "hashicorp"
- serviceSlug: "backup"
- providerVersion: "${{ inputs.provider_version || 'latest' }}"
- providerDataType: "resources"
2. Get documentation for all Backup resources (aws_backup_*)
3. Also check data sources with providerDataType: "data-sources"
### Step 3: Analyze Current Module Implementation
Examine these files to understand current implementation:
- `main.tf` - Primary backup resources and locals
- `iam.tf` - IAM roles and policies
- `notifications.tf` - SNS and notification configurations
- `organizations.tf` - AWS Organizations backup policies
- `selection.tf` - Resource selection logic
- `reports.tf` - Backup reporting configurations
- `audit_manager.tf` - Audit framework configurations
- `variables.tf` - Input variables
- `outputs.tf` - Module outputs
Create an inventory of:
- Implemented resources (aws_backup_vault, aws_backup_plan, etc.)
- Implemented arguments/attributes on each resource
- Configuration patterns used in examples (16 examples available)
### Step 4: Comparison and Analysis
Compare provider documentation with module implementation:
**New Features to Look For:**
- New `aws_backup_*` resources not in the module
- New arguments on existing resources (vault, plan, selection, etc.)
- New data sources (`data.aws_backup_*`)
- New backup lifecycle and retention features
- New compliance and audit capabilities
- New cross-region and cross-account features
- New reporting and monitoring capabilities
- New organization-level backup policies
- New VSS (Volume Shadow Copy Service) features
**Deprecations to Check:**
- Arguments marked as deprecated
- Resources marked for removal
- Backup patterns no longer recommended
- Configuration approaches that are outdated
**Bug Fixes:**
- Check Context7 for AWS provider changelogs
- Look for Backup-related fixes that might affect the module
### Step 5: Issue Creation
**CRITICAL: You MUST create actual GitHub issues when dry_run is false.**
**Current Configuration: dry_run = ${{ inputs.dry_run || 'false' }}**
**If NOT in dry run mode (${{ inputs.dry_run }} == 'false' or inputs.dry_run is empty):**
For EACH new feature discovered, you MUST execute these commands:
**For new AWS Backup resources (aws_backup_*):**
```bash
gh issue create --title "feat: Add support for [EXACT_RESOURCE_NAME]" \
--body "## Feature Request
### AWS Resource
**Resource:** \`[EXACT_RESOURCE_NAME]\`
**Provider Version:** ${{ inputs.provider_version || 'latest' }}
**Discovery Date:** $(date -u '+%Y-%m-%d')
### Description
[RESOURCE_DESCRIPTION_FROM_DOCS]
### Arguments/Attributes
[LIST_OF_ARGUMENTS]
### Implementation Priority
[HIGH/MEDIUM/LOW based on security_impact]
### Security Impact
[SECURITY_IMPLICATIONS]
### References
- AWS Provider Documentation: [DOC_LINK]
- Terraform Registry: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/[resource_name]
### Auto-Discovery Metadata
- Discovered by: AWS Backup Feature Discovery Bot
- Scan ID: $(date +%s)
- Provider Doc ID: [PROVIDER_DOC_ID]" \
--label "enhancement,aws-provider-update,auto-discovered,needs-implementation" \
--assignee "lgallard"
```
**For new arguments on existing resources:**
```bash
gh issue create --title "feat: Add [ARGUMENT_NAME] support to [RESOURCE_NAME]" \
--body "## Enhancement Request
### Resource Enhancement
**Resource:** \`[RESOURCE_NAME]\`
**New Argument:** \`[ARGUMENT_NAME]\`
**Provider Version:** ${{ inputs.provider_version || 'latest' }}
### Argument Details
[ARGUMENT_DESCRIPTION]
### Implementation Impact
[IMPACT_ON_EXISTING_MODULE]
### Auto-Discovery Metadata
- Discovered by: AWS Backup Feature Discovery Bot
- Priority: [PRIORITY_LEVEL]" \
--label "enhancement,aws-provider-update,auto-discovered,argument-addition" \
--assignee "lgallard"
```
**VERIFICATION STEP: After creating each issue, capture the issue number and update the tracker:**
```bash
# Get the issue number from the previous command
ISSUE_NUMBER=$(gh issue list --limit 1 --json number --jq '.[0].number')
echo "Created issue #$ISSUE_NUMBER for [FEATURE_NAME]"
# You MUST update the feature tracker to mark the issue as created (not pending_creation)
# This will be handled in the tracker update step
```
### Step 6: Update Feature Tracker
Update `.github/feature-tracker/backup-features.json` with:
- Current scan timestamp
- Provider version analyzed
- New findings
- Issues created
- Scan summary
### Step 7: Generate Summary Report
Create a comprehensive summary including:
- Features discovered: count and details
- Deprecations found: count and impact
- Issues created: numbers and links
- Recommendations for next steps
## Important Notes
- Skip creating issues for features already tracked as "implemented"
- Check existing GitHub issues to avoid duplicates
- Prioritize compliance, security, and data protection changes
- Focus on Backup-specific features (ignore general AWS provider changes)
- Pay special attention to vault, plan, selection, and audit configurations
- Consider cross-region backup and disaster recovery improvements
- Look for cost optimization and lifecycle management enhancements
## Expected Output
Provide a detailed report of your analysis and actions taken.
- name: Post-process pending issue creation
id: create-pending-issues
if: steps.claude-discovery.conclusion == 'success' && inputs.dry_run != true
run: |
set -euo pipefail
echo "🔍 Checking for features with pending_creation status..."
TRACKER_FILE=".github/feature-tracker/backup-features.json"
ISSUES_CREATED=0
if [ ! -f "$TRACKER_FILE" ]; then
echo "Feature tracker file not found, skipping post-processing"
exit 0
fi
# Extract pending creation features using jq
PENDING_FEATURES=$(jq -r '.issues_created[] | select(.status == "pending_creation") | @base64' "$TRACKER_FILE" 2>/dev/null || echo "")
if [ -z "$PENDING_FEATURES" ]; then
echo "✅ No features with pending_creation status found"
exit 0
fi
echo "📝 Found features requiring issue creation:"
# Process each pending feature
while IFS= read -r feature_data; do
if [ -n "$feature_data" ]; then
# Decode base64 and extract fields
FEATURE_JSON=$(echo "$feature_data" | base64 --decode)
RESOURCE=$(echo "$FEATURE_JSON" | jq -r '.resource')
TITLE=$(echo "$FEATURE_JSON" | jq -r '.title')
ISSUE_TYPE=$(echo "$FEATURE_JSON" | jq -r '.issue_type')
echo "Creating issue for: $RESOURCE"
# Create the issue based on type
if [ "$ISSUE_TYPE" = "new-feature" ]; then
ISSUE_URL=$(gh issue create \
--title "$TITLE" \
--body "## AWS Backup Feature Request
### Resource
**AWS Resource:** \`$RESOURCE\`
**Provider Version:** ${{ inputs.provider_version || 'latest' }}
**Discovery Date:** $(date -u '+%Y-%m-%d')
### Auto-Discovery Details
This feature was automatically discovered by the AWS Backup Feature Discovery workflow.
**Discovery Metadata:**
- Scan Date: $(date -u '+%Y-%m-%d %H:%M:%S UTC')
- Workflow Run: ${{ github.run_id }}
- Repository: ${{ github.repository }}
### Next Steps
1. Review AWS provider documentation for this resource
2. Analyze integration requirements with existing module
3. Design implementation approach
4. Add comprehensive tests
5. Update documentation and examples
### Implementation Priority
This feature requires evaluation for:
- Security and compliance impact
- Backward compatibility
- Module architecture integration
---
*Auto-generated by AWS Backup Feature Discovery Bot*" \
--label "enhancement,aws-provider-update,auto-discovered,needs-triage" \
--assignee "lgallard")
# Extract issue number from URL
ISSUE_NUMBER=$(echo "$ISSUE_URL" | grep -o '[0-9]*$')
echo "✅ Created issue #$ISSUE_NUMBER for $RESOURCE"
ISSUES_CREATED=$((ISSUES_CREATED + 1))
# Update the tracker file to mark as created (using temporary approach)
jq --arg resource "$RESOURCE" --arg issue_num "$ISSUE_NUMBER" --arg issue_url "$ISSUE_URL" '
(.issues_created[] | select(.resource == $resource)) |= (
.status = "created" |
.issue_number = ($issue_num | tonumber) |
.issue_url = $issue_url |
.actual_creation_date = now | strftime("%Y-%m-%dT%H:%M:%SZ")
)' "$TRACKER_FILE" > "${TRACKER_FILE}.tmp" && mv "${TRACKER_FILE}.tmp" "$TRACKER_FILE"
fi
fi
done <<< "$PENDING_FEATURES"
echo "🎯 Post-processing complete: Created $ISSUES_CREATED issues"
echo "issues_created=$ISSUES_CREATED" >> $GITHUB_OUTPUT
- name: Commit feature tracker updates
if: steps.claude-discovery.conclusion == 'success'
run: |
set -euo pipefail
# Atomic file operations with locking
LOCKFILE="/tmp/feature-tracker.lock"
TRACKER_FILE=".github/feature-tracker/backup-features.json"
TEMP_FILE="${TRACKER_FILE}.tmp"
# Acquire lock with timeout
exec 200>"$LOCKFILE"
if ! flock -w 30 200; then
echo "::error::Failed to acquire lock for feature tracker update"
exit 1
fi
# Check if there are changes to commit
if git diff --quiet .github/feature-tracker/; then
echo "No changes to feature tracker detected"
flock -u 200
exit 0
fi
# Validate JSON before committing
if [ -f "$TRACKER_FILE" ]; then
if ! python3 -m json.tool "$TRACKER_FILE" > /dev/null; then
echo "::error::Invalid JSON in feature tracker file"
flock -u 200
exit 1
fi
fi
# Configure git
git config --global user.name "AWS Backup Feature Discovery Bot"
git config --global user.email "actions@github.com"
# Atomic commit with validation
git add .github/feature-tracker/
git commit -m "chore: update AWS Backup feature discovery tracker
- Updated feature tracking database
- Scan completed: $(date -u '+%Y-%m-%d %H:%M:%S UTC')
- Provider version: ${{ inputs.provider_version || 'latest' }}
[skip ci]"
# Push changes
git push origin HEAD
# Release lock
flock -u 200
echo "Feature tracker updated successfully with atomic operations"
- name: Workflow Summary
if: always()
run: |
echo "## 🔍 AWS Backup Feature Discovery Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Configuration
echo "### ⚙️ Configuration" >> $GITHUB_STEP_SUMMARY
echo "- **Provider Version**: \`${{ inputs.provider_version || 'latest' }}\`" >> $GITHUB_STEP_SUMMARY
echo "- **Dry Run Mode**: \`${{ inputs.dry_run }}\`" >> $GITHUB_STEP_SUMMARY
echo "- **Force Scan**: \`${{ inputs.force_scan }}\`" >> $GITHUB_STEP_SUMMARY
echo "- **Triggered**: \`${{ github.event_name }}\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# MCP Configuration
echo "### 🔗 MCP Servers" >> $GITHUB_STEP_SUMMARY
echo "- **Terraform MCP**: \`@modelcontextprotocol/server-terraform@latest\`" >> $GITHUB_STEP_SUMMARY
echo "- **Context7 MCP**: \`@upstash/context7-mcp@latest\`" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Status
echo "### 📊 Execution Status" >> $GITHUB_STEP_SUMMARY
if [ "${{ steps.claude-discovery.conclusion }}" = "success" ]; then
echo "- ✅ **Feature Discovery**: Completed successfully" >> $GITHUB_STEP_SUMMARY
else
echo "- ❌ **Feature Discovery**: Failed" >> $GITHUB_STEP_SUMMARY
fi
# Issue Creation Status
if [ "${{ inputs.dry_run }}" = "true" ]; then
echo "- 🧪 **Issue Creation**: Skipped (dry run mode)" >> $GITHUB_STEP_SUMMARY
elif [ "${{ steps.create-pending-issues.conclusion }}" = "success" ]; then
ISSUES_COUNT="${{ steps.create-pending-issues.outputs.issues_created || '0' }}"
if [ "$ISSUES_COUNT" -gt 0 ]; then
echo "- ✅ **Issue Creation**: Created $ISSUES_COUNT new issues" >> $GITHUB_STEP_SUMMARY
else
echo "- ✅ **Issue Creation**: No new issues needed" >> $GITHUB_STEP_SUMMARY
fi
elif [ "${{ steps.create-pending-issues.conclusion }}" = "skipped" ]; then
echo "- ⏭️ **Issue Creation**: Skipped (no feature discovery or dry run)" >> $GITHUB_STEP_SUMMARY
else
echo "- ❌ **Issue Creation**: Failed or incomplete" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
# Available Commands
echo "### 🚀 Manual Execution" >> $GITHUB_STEP_SUMMARY
echo "Run feature discovery manually:" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
echo "# Standard discovery" >> $GITHUB_STEP_SUMMARY
echo "gh workflow run feature-discovery.yml" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "# Dry run mode" >> $GITHUB_STEP_SUMMARY
echo "gh workflow run feature-discovery.yml -f dry_run=true" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "# Specific provider version" >> $GITHUB_STEP_SUMMARY
echo "gh workflow run feature-discovery.yml -f provider_version=5.82.0" >> $GITHUB_STEP_SUMMARY
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY