Compare commits
234 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f695b97682 | |||
| 4c8c163558 | |||
| 22c79cbabc | |||
| 2f2abb5c81 | |||
| 8047d98736 | |||
| 0e2bed2b70 | |||
| 8531c006d4 | |||
| 14dcb74e89 | |||
| 1700227483 | |||
| b335f98330 | |||
| 2e8343526e | |||
| 0f26f25cb9 | |||
| 21a35a8b48 | |||
| 0065db672c | |||
| ac8a77e539 | |||
| 8956d5bc53 | |||
| 40f41c3028 | |||
| 5bb789333d | |||
| 9457911e4a | |||
| aa8c6fbd90 | |||
| c37d584aa2 | |||
| fd0d948c16 | |||
| 7bd7460b5b | |||
| 69f4fb418a | |||
| 18ffb7af61 | |||
| c11e3217ea | |||
| d16443109a | |||
| f93f9c9780 | |||
| 55d11e2887 | |||
| f192139cc3 | |||
| af41fc6cb8 | |||
| 777bd2a488 | |||
| 99c2e55ec4 | |||
| bf720fcea2 | |||
| e81eeb0c4d | |||
| f586940b6c | |||
| bcb8933261 | |||
| 73542f9efe | |||
| a3695e3e6a | |||
| ba9da112db | |||
| 1b91e5441f | |||
| d73878a1e1 | |||
| 1cc3eaa0fa | |||
| bf65c3cf45 | |||
| 456a1508c0 | |||
| 3991f1625f | |||
| 4c1493506d | |||
| 80e75061da | |||
| 5b48737cc7 | |||
| da2dac70ac | |||
| 8c8ea1209b | |||
| df3718a06c | |||
| 6486d04e61 | |||
| 96928ac43c | |||
| 8e7de3f59e | |||
| fbc0b7a66d | |||
| 78eda6672e | |||
| 9f05634531 | |||
| defc308a9d | |||
| 9a6c030a74 | |||
| afe2caddd6 | |||
| 8349bc7c3a | |||
| 04ab1cfc8d | |||
| d233fd850e | |||
| 948aca316b | |||
| 3f05737bf2 | |||
| 4aabf47a5d | |||
| cb47aef7e4 | |||
| bb05fcff6f | |||
| 8634ba84ca | |||
| 3bd6b9171e | |||
| 18a933ba45 | |||
| 0187d9a553 | |||
| 7672da5b6d | |||
| 966266f742 | |||
| 0738d08966 | |||
| 4603096b93 | |||
| a5d3455333 | |||
| c83f961ffc | |||
| a6ed312eaf | |||
| 35b0cf26d9 | |||
| 0f011a1797 | |||
| 7719110f1e | |||
| 212ac2f0d5 | |||
| 2e49587cc2 | |||
| 47285675b9 | |||
| 2678df2e4c | |||
| 862e9f3de9 | |||
| 260a9645be | |||
| 1d5d606085 | |||
| c249be1cc0 | |||
| 1ec3a5fe44 | |||
| 3b082ea736 | |||
| b6ebd468c6 | |||
| ceb210f281 | |||
| fcb627dccd | |||
| 098460702b | |||
| 5849898283 | |||
| 878ca9f032 | |||
| 24f86b74f5 | |||
| a8c1c8c63b | |||
| 1ff17c429b | |||
| 145724bc95 | |||
| 5aefc9831d | |||
| cf3ca440b9 | |||
| 4109a4405d | |||
| 055dc69158 | |||
| 1ea9850842 | |||
| 6e92eecdf6 | |||
| 844f1e31af | |||
| fd83be9d95 | |||
| 456f7fffc5 | |||
| 541d64168d | |||
| 26c4c80fd2 | |||
| bcc8099f91 | |||
| 6392dcd9a0 | |||
| 916e1713d8 | |||
| 08fcfcdac8 | |||
| 20daf73371 | |||
| b909643c21 | |||
| 5a0ee4aaa7 | |||
| 8289f1edda | |||
| 74672f6625 | |||
| c1ea6b167c | |||
| 71679691c4 | |||
| cbf6f647aa | |||
| fb9c5afc5f | |||
| 5d2626b639 | |||
| 2399df693d | |||
| df44c7d6f1 | |||
| 6d296b84d5 | |||
| 1dadb93452 | |||
| 6ea4f6e123 | |||
| 5a8417cf18 | |||
| 4e922a2a48 | |||
| 3ef48a97cf | |||
| 35d629d8ce | |||
| dba1391a08 | |||
| 961d34d05a | |||
| 5d0c21c5d9 | |||
| 71046216ce | |||
| 89647ae457 | |||
| 216d30fd45 | |||
| 82e36e1fd3 | |||
| 34994e615a | |||
| 6daceaeb2b | |||
| 50da5ea82d | |||
| da7fb6cd49 | |||
| cfa90b36d8 | |||
| 72fb21b132 | |||
| 948bfce5a0 | |||
| 049a43ace2 | |||
| 1c845d2b3c | |||
| cb74761675 | |||
| be63c6a231 | |||
| eab18925c7 | |||
| 4c9d23acb1 | |||
| ecdac00145 | |||
| 2555a86f5f | |||
| ebce965730 | |||
| a2af1329d7 | |||
| bf3a967fad | |||
| 3dcc80a0e8 | |||
| 624ff8bc1b | |||
| 59f51f6149 | |||
| b4090b8301 | |||
| 02b58ad1a9 | |||
| 81b7e893dc | |||
| 8c6c94cc34 | |||
| fac60679bc | |||
| 55122d523c | |||
| 5825f14ef1 | |||
| 60c2f1f18c | |||
| 44e3911e01 | |||
| 28f642d39f | |||
| 4dfe99064b | |||
| 097cbe3df3 | |||
| 3123e34076 | |||
| c334ea9f91 | |||
| bfecea00ad | |||
| 1c7c462771 | |||
| 308ba24e90 | |||
| 4a8f6550af | |||
| c56cce6319 | |||
| 832e4f9fa2 | |||
| 63008d19af | |||
| fbd78301a0 | |||
| 2961d68200 | |||
| f86683d9ca | |||
| 58bcfb9ee5 | |||
| 5be5124e70 | |||
| b8291710d9 | |||
| b51a8da125 | |||
| 2a0882869b | |||
| c3d690371b | |||
| 30c099f872 | |||
| c660c1a676 | |||
| c44a88b57a | |||
| 509017f8df | |||
| 7243059cdb | |||
| cd3cfc3837 | |||
| 509d68f023 | |||
| de757e848d | |||
| 1fb462bab4 | |||
| 4c4312f9b5 | |||
| ff684d7544 | |||
| b69b4dc98c | |||
| 34351ae14e | |||
| d9945f5e26 | |||
| 6247656a63 | |||
| c46217918f | |||
| 330e4a1e85 | |||
| bded05688e | |||
| 13e1836604 | |||
| d611d00b55 | |||
| e4a2e06def | |||
| df3c21701d | |||
| 85c816c8cb | |||
| 4633d3f520 | |||
| 60288c09e4 | |||
| 49bc3820aa | |||
| eed8aa0e9d | |||
| e13879ab77 | |||
| 8a8e4faa42 | |||
| faa2570883 | |||
| 5c0ee0c5a0 | |||
| 780a32d706 | |||
| 29f05e0b70 | |||
| 0144b569ad | |||
| dd2747d12a | |||
| 5ef63cd7e6 | |||
| ac6002028d | |||
| 5f80f1b76a | |||
| 95276760be |
@@ -0,0 +1,2 @@
|
||||
*.go @Ullaakut @whiteboxsolutions @nblair2
|
||||
*.md @Ullaakut @whiteboxsolutions @nblair2
|
||||
@@ -0,0 +1,103 @@
|
||||
name: Bug report
|
||||
description: Create a report to help Cameradar improve
|
||||
labels:
|
||||
- needs-triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please make sure your problem is not already addressed in another issue.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please give a clear and concise description of the bug.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: Cameradar version
|
||||
description: Output of `cameradar version`
|
||||
render: bash
|
||||
placeholder: |
|
||||
Version: v6.0.2-SNAPSHOT-c11e321
|
||||
Commit: c11e3217ea0b1ea9e45d0da4c072e07775bde68c
|
||||
Build date: 2026-02-03T10:02:30Z
|
||||
Nmap: 7.94SVN
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: env
|
||||
attributes:
|
||||
label: Environment
|
||||
description: How do you run cameradar?
|
||||
options:
|
||||
- "`ullaakut/cameradar` docker image"
|
||||
- Precompiled binary from GitHub releases
|
||||
- Custom docker image
|
||||
- Custom binary build
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: Operating system
|
||||
description: Operating system where you run cameradar.
|
||||
render: bash
|
||||
placeholder: |
|
||||
- OS: <Windows | macOS | Linux | Other>
|
||||
- OS version: <version>
|
||||
- Architecture: <arch>
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: cmd
|
||||
attributes:
|
||||
label: Command
|
||||
description: The command that you ran and all of its arguments. Make sure to redact any sensitive information. Make sure to run your command in debug mode.
|
||||
placeholder: |
|
||||
E.g. `docker run --net=host -it ullaakut/cameradar -t localhost --debug`
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: output
|
||||
attributes:
|
||||
label: Output logs
|
||||
description: Output of the command you ran, including any error messages. Make sure to redact any sensitive information.
|
||||
placeholder: |
|
||||
2026-02-03T09:33:24Z [INFO] Startup: Running cameradar version 6.0.2-SNAPSHOT-75bf524, commit 75bf524
|
||||
2026-02-03T09:33:24Z [INFO] Startup: targets: localhost
|
||||
2026-02-03T09:33:24Z [INFO] Startup: ports: 554, 5554, 8554, http
|
||||
...
|
||||
Accessible streams: 1
|
||||
• 127.0.0.1:8554 (GStreamer rtspd)
|
||||
Authentication: digest
|
||||
Routes: live.sdp
|
||||
Credentials: admin:12345
|
||||
Availability: yes
|
||||
RTSP URL: rtsp://admin:12345@127.0.0.1:8554/live.sdp
|
||||
Admin panel: http://127.0.0.1/
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: What is the expected behavior?
|
||||
placeholder: |
|
||||
E.g. "Cameradar should have been able to find the camera's RTSP stream using the provided credentials."
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Info
|
||||
description: Additional info you want to provide such as system info, target info, network conditions etc.
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/Ullaakut/cameradar/blob/master/CODE_OF_CONDUCT.md).
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
||||
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Cameradar Community discussion board
|
||||
url: https://github.com/Ullaakut/cameradar/discussions
|
||||
about: Please ask and answer questions here.
|
||||
@@ -0,0 +1,31 @@
|
||||
name: Feature request
|
||||
description: Propose a feature or enhancement to help Cameradar improve
|
||||
labels:
|
||||
- needs-triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please make sure your request is not already proposed in another issue.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please give a clear and concise description of the feature request.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Info
|
||||
description: Additional info you want to provide.
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/Ullaakut/cameradar/blob/master/CODE_OF_CONDUCT.md).
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
||||
@@ -0,0 +1,9 @@
|
||||
## Goal of this PR
|
||||
|
||||
<!-- A brief description of the change being made with this pull request. -->
|
||||
|
||||
Fixes #
|
||||
|
||||
## How did I test it?
|
||||
|
||||
<!-- A brief description of the steps taken to test this pull request. -->
|
||||
@@ -0,0 +1,20 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
groups:
|
||||
all:
|
||||
patterns:
|
||||
- "*"
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
groups:
|
||||
all:
|
||||
patterns:
|
||||
- "*"
|
||||
open-pull-requests-limit: 10
|
||||
@@ -0,0 +1,637 @@
|
||||
---
|
||||
applyTo: '.github/workflows/*.yml'
|
||||
description: 'Comprehensive guide for building robust, secure, and efficient CI/CD pipelines using GitHub Actions. Covers workflow structure, jobs, steps, environment variables, secret management, caching, matrix strategies, testing, and deployment strategies.'
|
||||
---
|
||||
|
||||
# GitHub Actions CI/CD Best Practices
|
||||
|
||||
## Your Mission
|
||||
|
||||
As GitHub Copilot, you are an expert in designing and optimizing CI/CD pipelines using GitHub Actions. Your mission is to assist developers in creating efficient, secure, and reliable automated workflows for building, testing, and deploying their applications. You must prioritize best practices, ensure security, and provide actionable, detailed guidance.
|
||||
|
||||
## Core Concepts and Structure
|
||||
|
||||
### **1. Workflow Structure (`.github/workflows/*.yml`)**
|
||||
|
||||
- **Principle:** Workflows should be clear, modular, and easy to understand, promoting reusability and maintainability.
|
||||
- **Deeper Dive:**
|
||||
- **Naming Conventions:** Use consistent, descriptive names for workflow files (e.g., `build-and-test.yml`, `deploy-prod.yml`).
|
||||
- **Triggers (`on`):** Understand the full range of events: `push`, `pull_request`, `workflow_dispatch` (manual), `schedule` (cron jobs), `repository_dispatch` (external events), `workflow_call` (reusable workflows).
|
||||
- **Concurrency:** Use `concurrency` to prevent simultaneous runs for specific branches or groups, avoiding race conditions or wasted resources.
|
||||
- **Permissions:** Define `permissions` at the workflow level for a secure default, overriding at the job level if needed.
|
||||
- **Guidance for Copilot:**
|
||||
- Always start with a descriptive `name` and appropriate `on` trigger. Suggest granular triggers for specific use cases (e.g., `on: push: branches: [main]` vs. `on: pull_request`).
|
||||
- Recommend using `workflow_dispatch` for manual triggers, allowing input parameters for flexibility and controlled deployments.
|
||||
- Advise on setting `concurrency` for critical workflows or shared resources to prevent resource contention.
|
||||
- Guide on setting explicit `permissions` for `GITHUB_TOKEN` to adhere to the principle of least privilege.
|
||||
- **Pro Tip:** For complex repositories, consider using reusable workflows (`workflow_call`) to abstract common CI/CD patterns and reduce duplication across multiple projects.
|
||||
|
||||
### **2. Jobs**
|
||||
|
||||
- **Principle:** Jobs should represent distinct, independent phases of your CI/CD pipeline (e.g., build, test, deploy, lint, security scan).
|
||||
- **Deeper Dive:**
|
||||
- **`runs-on`:** Choose appropriate runners. `ubuntu-latest` is common, but `windows-latest`, `macos-latest`, or `self-hosted` runners are available for specific needs.
|
||||
- **`needs`:** Clearly define dependencies. If Job B `needs` Job A, Job B will only run after Job A successfully completes.
|
||||
- **`outputs`:** Pass data between jobs using `outputs`. This is crucial for separating concerns (e.g., build job outputs artifact path, deploy job consumes it).
|
||||
- **`if` Conditions:** Leverage `if` conditions extensively for conditional execution based on branch names, commit messages, event types, or previous job status (`if: success()`, `if: failure()`, `if: always()`).
|
||||
- **Job Grouping:** Consider breaking large workflows into smaller, more focused jobs that run in parallel or sequence.
|
||||
- **Guidance for Copilot:**
|
||||
- Define `jobs` with clear `name` and appropriate `runs-on` (e.g., `ubuntu-latest`, `windows-latest`, `self-hosted`).
|
||||
- Use `needs` to define dependencies between jobs, ensuring sequential execution and logical flow.
|
||||
- Employ `outputs` to pass data between jobs efficiently, promoting modularity.
|
||||
- Utilize `if` conditions for conditional job execution (e.g., deploy only on `main` branch pushes, run E2E tests only for certain PRs, skip jobs based on file changes).
|
||||
- **Example (Conditional Deployment and Output Passing):**
|
||||
```yaml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
artifact_path: ${{ steps.package_app.outputs.path }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
- name: Install dependencies and build
|
||||
run: |
|
||||
npm ci
|
||||
npm run build
|
||||
- name: Package application
|
||||
id: package_app
|
||||
run: | # Assume this creates a 'dist.zip' file
|
||||
zip -r dist.zip dist
|
||||
echo "path=dist.zip" >> "$GITHUB_OUTPUT"
|
||||
- name: Upload build artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: my-app-build
|
||||
path: dist.zip
|
||||
|
||||
deploy-staging:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/develop' || github.ref == 'refs/heads/main'
|
||||
environment: staging
|
||||
steps:
|
||||
- name: Download build artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: my-app-build
|
||||
- name: Deploy to Staging
|
||||
run: |
|
||||
unzip dist.zip
|
||||
echo "Deploying ${{ needs.build.outputs.artifact_path }} to staging..."
|
||||
# Add actual deployment commands here
|
||||
```
|
||||
|
||||
### **3. Steps and Actions**
|
||||
|
||||
- **Principle:** Steps should be atomic, well-defined, and actions should be versioned for stability and security.
|
||||
- **Deeper Dive:**
|
||||
- **`uses`:** Referencing marketplace actions (e.g., `actions/checkout@v4`, `actions/setup-node@v3`) or custom actions. Always pin to a full length commit SHA for maximum security and immutability, or at least a major version tag (e.g., `@v4`). Avoid pinning to `main` or `latest`.
|
||||
- **`name`:** Essential for clear logging and debugging. Make step names descriptive.
|
||||
- **`run`:** For executing shell commands. Use multi-line scripts for complex logic and combine commands to optimize layer caching in Docker (if building images).
|
||||
- **`env`:** Define environment variables at the step or job level. Do not hardcode sensitive data here.
|
||||
- **`with`:** Provide inputs to actions. Ensure all required inputs are present.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `uses` to reference marketplace or custom actions, always specifying a secure version (tag or SHA).
|
||||
- Use `name` for each step for readability in logs and easier debugging.
|
||||
- Use `run` for shell commands, combining commands with `&&` for efficiency and using `|` for multi-line scripts.
|
||||
- Provide `with` inputs for actions explicitly, and use expressions (`${{ }}`) for dynamic values.
|
||||
- **Security Note:** Audit marketplace actions before use. Prefer actions from trusted sources (e.g., `actions/` organization) and review their source code if possible. Use `dependabot` for action version updates.
|
||||
|
||||
## Security Best Practices in GitHub Actions
|
||||
|
||||
### **1. Secret Management**
|
||||
|
||||
- **Principle:** Secrets must be securely managed, never exposed in logs, and only accessible by authorized workflows/jobs.
|
||||
- **Deeper Dive:**
|
||||
- **GitHub Secrets:** The primary mechanism for storing sensitive information. Encrypted at rest and only decrypted when passed to a runner.
|
||||
- **Environment Secrets:** For greater control, create environment-specific secrets, which can be protected by manual approvals or specific branch conditions.
|
||||
- **Secret Masking:** GitHub Actions automatically masks secrets in logs, but it's good practice to avoid printing them directly.
|
||||
- **Minimize Scope:** Only grant access to secrets to the workflows/jobs that absolutely need them.
|
||||
- **Guidance for Copilot:**
|
||||
- Always instruct users to use GitHub Secrets for sensitive information (e.g., API keys, passwords, cloud credentials, tokens).
|
||||
- Access secrets via `secrets.<SECRET_NAME>` in workflows.
|
||||
- Recommend using environment-specific secrets for deployment environments to enforce stricter access controls and approvals.
|
||||
- Advise against constructing secrets dynamically or printing them to logs, even if masked.
|
||||
- **Example (Environment Secrets with Approval):**
|
||||
```yaml
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: production
|
||||
url: https://prod.example.com
|
||||
steps:
|
||||
- name: Deploy to production
|
||||
env:
|
||||
PROD_API_KEY: ${{ secrets.PROD_API_KEY }}
|
||||
run: ./deploy-script.sh
|
||||
```
|
||||
|
||||
### **2. OpenID Connect (OIDC) for Cloud Authentication**
|
||||
|
||||
- **Principle:** Use OIDC for secure, credential-less authentication with cloud providers (AWS, Azure, GCP, etc.), eliminating the need for long-lived static credentials.
|
||||
- **Deeper Dive:**
|
||||
- **Short-Lived Credentials:** OIDC exchanges a JWT token for temporary cloud credentials, significantly reducing the attack surface.
|
||||
- **Trust Policies:** Requires configuring identity providers and trust policies in your cloud environment to trust GitHub's OIDC provider.
|
||||
- **Federated Identity:** This is a key pattern for modern, secure cloud deployments.
|
||||
- **Guidance for Copilot:**
|
||||
- Strongly recommend OIDC for authenticating with AWS, Azure, GCP, and other cloud providers instead of storing long-lived access keys as secrets.
|
||||
- Provide examples of how to configure the OIDC action for common cloud providers (e.g., `aws-actions/configure-aws-credentials@v4`).
|
||||
- Explain the concept of trust policies and how they relate to OIDC setup.
|
||||
- **Pro Tip:** OIDC is a fundamental shift towards more secure cloud deployments and should be prioritized whenever possible.
|
||||
|
||||
### **3. Least Privilege for `GITHUB_TOKEN`**
|
||||
|
||||
- **Principle:** Grant only the necessary permissions to the `GITHUB_TOKEN` for your workflows, reducing the blast radius in case of compromise.
|
||||
- **Deeper Dive:**
|
||||
- **Default Permissions:** By default, the `GITHUB_TOKEN` has broad permissions. This should be explicitly restricted.
|
||||
- **Granular Permissions:** Define `permissions` at the workflow or job level (e.g., `contents: read`, `pull-requests: write`, `issues: read`).
|
||||
- **Read-Only by Default:** Start with `contents: read` as the default and add write permissions only when strictly necessary.
|
||||
- **Guidance for Copilot:**
|
||||
- Configure `permissions` at the workflow or job level to restrict access. Always prefer `contents: read` as the default.
|
||||
- Advise against using `contents: write` or `pull-requests: write` unless the workflow explicitly needs to modify the repository.
|
||||
- Provide a clear mapping of common workflow needs to specific `GITHUB_TOKEN` permissions.
|
||||
- **Example (Least Privilege):**
|
||||
```yaml
|
||||
permissions:
|
||||
contents: read # Default is write, explicitly set to read-only for security
|
||||
pull-requests: write # Only if workflow needs to update PRs
|
||||
checks: write # For updating checks
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
permissions:
|
||||
contents: read # This job only needs to read code, override workflow default
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: npm run lint
|
||||
```
|
||||
|
||||
### **4. Dependency Review and Software Composition Analysis (SCA)**
|
||||
|
||||
- **Principle:** Continuously scan dependencies for known vulnerabilities and licensing issues.
|
||||
- **Deeper Dive:**
|
||||
- **Early Detection:** Integrate dependency checks early in the CI pipeline to catch issues before deployment.
|
||||
- **Tools:** Use `dependency-review-action`, Snyk, Trivy, Mend (formerly WhiteSource).
|
||||
- **Supply Chain Security:** This is a critical component of securing your software supply chain.
|
||||
- **Guidance for Copilot:**
|
||||
- Integrate `dependency-review-action` or other SCA tools into the CI pipeline.
|
||||
- Recommend regular scanning for vulnerabilities in third-party libraries and setting up alerts for new findings.
|
||||
- Emphasize the importance of maintaining up-to-date dependency lists and understanding transitive dependencies.
|
||||
|
||||
### **5. Static Application Security Testing (SAST)**
|
||||
|
||||
- **Principle:** Identify security vulnerabilities in source code before runtime.
|
||||
- **Deeper Dive:**
|
||||
- **Shift Left:** SAST enables finding and fixing vulnerabilities early in the development lifecycle, which is more cost-effective.
|
||||
- **Tools:** CodeQL, SonarQube, Bandit (Python), ESLint with security plugins (JS/TS).
|
||||
- **Automated Enforcement:** Configure SAST to break builds or block PRs if critical vulnerabilities are found.
|
||||
- **Guidance for Copilot:**
|
||||
- Integrate SAST tools (e.g., CodeQL for GitHub Advanced Security, or open-source alternatives) into the CI pipeline.
|
||||
- Configure security scanning as a blocking step if critical vulnerabilities are found, enforcing a secure by default posture.
|
||||
- Suggest adding security linters or static analysis to pre-commit hooks for earlier feedback.
|
||||
|
||||
### **6. Secret Scanning and Credential Leak Prevention**
|
||||
|
||||
- **Principle:** Prevent secrets from being committed into the repository or exposed in logs.
|
||||
- **Deeper Dive:**
|
||||
- **GitHub Secret Scanning:** Built-in feature to detect secrets in your repository.
|
||||
- **Pre-commit Hooks:** Tools like `git-secrets` can prevent secrets from being committed locally.
|
||||
- **Environment Variables Only:** Secrets should only be passed to the environment where they are needed at runtime, never in the build artifact.
|
||||
- **Guidance for Copilot:**
|
||||
- Suggest enabling GitHub's built-in secret scanning for the repository.
|
||||
- Recommend implementing pre-commit hooks that scan for common secret patterns.
|
||||
- Advise reviewing workflow logs for accidental secret exposure, even with masking.
|
||||
|
||||
### **7. Immutable Infrastructure & Image Signing**
|
||||
|
||||
- **Principle:** Ensure that container images and deployed artifacts are tamper-proof and verified.
|
||||
- **Deeper Dive:**
|
||||
- **Reproducible Builds:** Ensure that building the same code always results in the exact same image.
|
||||
- **Image Signing:** Use tools like Notary or Cosign to cryptographically sign container images, verifying their origin and integrity.
|
||||
- **Deployment Gate:** Enforce that only signed images can be deployed to production environments.
|
||||
- **Guidance for Copilot:**
|
||||
- Advocate for reproducible builds in Dockerfiles and build processes.
|
||||
- Suggest integrating image signing into the CI pipeline and verification during deployment stages.
|
||||
|
||||
## Optimization and Performance
|
||||
|
||||
### **1. Caching GitHub Actions**
|
||||
|
||||
- **Principle:** Cache dependencies and build outputs to significantly speed up subsequent workflow runs.
|
||||
- **Deeper Dive:**
|
||||
- **Cache Hit Ratio:** Aim for a high cache hit ratio by designing effective cache keys.
|
||||
- **Cache Keys:** Use a unique key based on file hashes (e.g., `hashFiles('**/package-lock.json')`, `hashFiles('**/requirements.txt')`) to invalidate the cache only when dependencies change.
|
||||
- **Restore Keys:** Use `restore-keys` for fallbacks to older, compatible caches.
|
||||
- **Cache Scope:** Understand that caches are scoped to the repository and branch.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `actions/cache@v3` for caching common package manager dependencies (Node.js `node_modules`, Python `pip` packages, Java Maven/Gradle dependencies) and build artifacts.
|
||||
- Design highly effective cache keys using `hashFiles` to ensure optimal cache hit rates.
|
||||
- Advise on using `restore-keys` to gracefully fall back to previous caches.
|
||||
- **Example (Advanced Caching for Monorepo):**
|
||||
```yaml
|
||||
- name: Cache Node.js modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.npm
|
||||
./node_modules # For monorepos, cache specific project node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}-
|
||||
${{ runner.os }}-node-
|
||||
```
|
||||
|
||||
### **2. Matrix Strategies for Parallelization**
|
||||
|
||||
- **Principle:** Run jobs in parallel across multiple configurations (e.g., different Node.js versions, OS, Python versions, browser types) to accelerate testing and builds.
|
||||
- **Deeper Dive:**
|
||||
- **`strategy.matrix`:** Define a matrix of variables.
|
||||
- **`include`/`exclude`:** Fine-tune combinations.
|
||||
- **`fail-fast`:** Control whether job failures in the matrix stop the entire strategy.
|
||||
- **Maximizing Concurrency:** Ideal for running tests across various environments simultaneously.
|
||||
- **Guidance for Copilot:**
|
||||
- Utilize `strategy.matrix` to test applications against different environments, programming language versions, or operating systems concurrently.
|
||||
- Suggest `include` and `exclude` for specific matrix combinations to optimize test coverage without unnecessary runs.
|
||||
- Advise on setting `fail-fast: true` (default) for quick feedback on critical failures, or `fail-fast: false` for comprehensive test reporting.
|
||||
- **Example (Multi-version, Multi-OS Test Matrix):**
|
||||
```yaml
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false # Run all tests even if one fails
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
node-version: [16.x, 18.x, 20.x]
|
||||
browser: [chromium, firefox]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
- name: Install Playwright browsers
|
||||
run: npx playwright install ${{ matrix.browser }}
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
```
|
||||
|
||||
### **3. Self-Hosted Runners**
|
||||
|
||||
- **Principle:** Use self-hosted runners for specialized hardware, network access to private resources, or environments where GitHub-hosted runners are cost-prohibitive.
|
||||
- **Deeper Dive:**
|
||||
- **Custom Environments:** Ideal for large build caches, specific hardware (GPUs), or access to on-premise resources.
|
||||
- **Cost Optimization:** Can be more cost-effective for very high usage.
|
||||
- **Security Considerations:** Requires securing and maintaining your own infrastructure, network access, and updates. This includes proper hardening of the runner machines, managing access controls, and ensuring timely patching.
|
||||
- **Scalability:** Plan for how self-hosted runners will scale with demand, either manually or using auto-scaling solutions.
|
||||
- **Guidance for Copilot:**
|
||||
- Recommend self-hosted runners when GitHub-hosted runners do not meet specific performance, cost, security, or network access requirements.
|
||||
- Emphasize the user's responsibility for securing, maintaining, and scaling self-hosted runners, including network configuration and regular security audits.
|
||||
- Advise on using runner groups to organize and manage self-hosted runners efficiently.
|
||||
|
||||
### **4. Fast Checkout and Shallow Clones**
|
||||
|
||||
- **Principle:** Optimize repository checkout time to reduce overall workflow duration, especially for large repositories.
|
||||
- **Deeper Dive:**
|
||||
- **`fetch-depth`:** Controls how much of the Git history is fetched. `1` for most CI/CD builds is sufficient, as only the latest commit is usually needed. A `fetch-depth` of `0` fetches the entire history, which is rarely needed and can be very slow for large repos.
|
||||
- **`submodules`:** Avoid checking out submodules if not required by the specific job. Fetching submodules adds significant overhead.
|
||||
- **`lfs`:** Manage Git LFS (Large File Storage) files efficiently. If not needed, set `lfs: false`.
|
||||
- **Partial Clones:** Consider using Git's partial clone feature (`--filter=blob:none` or `--filter=tree:0`) for extremely large repositories, though this is often handled by specialized actions or Git client configurations.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `actions/checkout@v4` with `fetch-depth: 1` as the default for most build and test jobs to significantly save time and bandwidth.
|
||||
- Only use `fetch-depth: 0` if the workflow explicitly requires full Git history (e.g., for release tagging, deep commit analysis, or `git blame` operations).
|
||||
- Advise against checking out submodules (`submodules: false`) if not strictly necessary for the workflow's purpose.
|
||||
- Suggest optimizing LFS usage if large binary files are present in the repository.
|
||||
|
||||
### **5. Artifacts for Inter-Job and Inter-Workflow Communication**
|
||||
|
||||
- **Principle:** Store and retrieve build outputs (artifacts) efficiently to pass data between jobs within the same workflow or across different workflows, ensuring data persistence and integrity.
|
||||
- **Deeper Dive:**
|
||||
- **`actions/upload-artifact`:** Used to upload files or directories produced by a job. Artifacts are automatically compressed and can be downloaded later.
|
||||
- **`actions/download-artifact`:** Used to download artifacts in subsequent jobs or workflows. You can download all artifacts or specific ones by name.
|
||||
- **`retention-days`:** Crucial for managing storage costs and compliance. Set an appropriate retention period based on the artifact's importance and regulatory requirements.
|
||||
- **Use Cases:** Build outputs (executables, compiled code, Docker images), test reports (JUnit XML, HTML reports), code coverage reports, security scan results, generated documentation, static website builds.
|
||||
- **Limitations:** Artifacts are immutable once uploaded. Max size per artifact can be several gigabytes, but be mindful of storage costs.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `actions/upload-artifact@v3` and `actions/download-artifact@v3` to reliably pass large files between jobs within the same workflow or across different workflows, promoting modularity and efficiency.
|
||||
- Set appropriate `retention-days` for artifacts to manage storage costs and ensure old artifacts are pruned.
|
||||
- Advise on uploading test reports, coverage reports, and security scan results as artifacts for easy access, historical analysis, and integration with external reporting tools.
|
||||
- Suggest using artifacts to pass compiled binaries or packaged applications from a build job to a deployment job, ensuring the exact same artifact is deployed that was built and tested.
|
||||
|
||||
## Comprehensive Testing in CI/CD (Expanded)
|
||||
|
||||
### **1. Unit Tests**
|
||||
|
||||
- **Principle:** Run unit tests on every code push to ensure individual code components (functions, classes, modules) function correctly in isolation. They are the fastest and most numerous tests.
|
||||
- **Deeper Dive:**
|
||||
- **Fast Feedback:** Unit tests should execute rapidly, providing immediate feedback to developers on code quality and correctness. Parallelization of unit tests is highly recommended.
|
||||
- **Code Coverage:** Integrate code coverage tools (e.g., Istanbul for JS, Coverage.py for Python, JaCoCo for Java) and enforce minimum coverage thresholds. Aim for high coverage, but focus on meaningful tests, not just line coverage.
|
||||
- **Test Reporting:** Publish test results using `actions/upload-artifact` (e.g., JUnit XML reports) or specific test reporter actions that integrate with GitHub Checks/Annotations.
|
||||
- **Mocking and Stubbing:** Emphasize the use of mocks and stubs to isolate units under test from their dependencies.
|
||||
- **Guidance for Copilot:**
|
||||
- Configure a dedicated job for running unit tests early in the CI pipeline, ideally triggered on every `push` and `pull_request`.
|
||||
- Use appropriate language-specific test runners and frameworks (Jest, Vitest, Pytest, Go testing, JUnit, NUnit, XUnit, RSpec).
|
||||
- Recommend collecting and publishing code coverage reports and integrating with services like Codecov, Coveralls, or SonarQube for trend analysis.
|
||||
- Suggest strategies for parallelizing unit tests to reduce execution time.
|
||||
|
||||
### **2. Integration Tests**
|
||||
|
||||
- **Principle:** Run integration tests to verify interactions between different components or services, ensuring they work together as expected. These tests typically involve real dependencies (e.g., databases, APIs).
|
||||
- **Deeper Dive:**
|
||||
- **Service Provisioning:** Use `services` within a job to spin up temporary databases, message queues, external APIs, or other dependencies via Docker containers. This provides a consistent and isolated testing environment.
|
||||
- **Test Doubles vs. Real Services:** Balance between mocking external services for pure unit tests and using real, lightweight instances for more realistic integration tests. Prioritize real instances when testing actual integration points.
|
||||
- **Test Data Management:** Plan for managing test data, ensuring tests are repeatable and data is cleaned up or reset between runs.
|
||||
- **Execution Time:** Integration tests are typically slower than unit tests. Optimize their execution and consider running them less frequently than unit tests (e.g., on PR merge instead of every push).
|
||||
- **Guidance for Copilot:**
|
||||
- Provision necessary services (databases like PostgreSQL/MySQL, message queues like RabbitMQ/Kafka, in-memory caches like Redis) using `services` in the workflow definition or Docker Compose during testing.
|
||||
- Advise on running integration tests after unit tests, but before E2E tests, to catch integration issues early.
|
||||
- Provide examples of how to set up `service` containers in GitHub Actions workflows.
|
||||
- Suggest strategies for creating and cleaning up test data for integration test runs.
|
||||
|
||||
### **3. End-to-End (E2E) Tests**
|
||||
|
||||
- **Principle:** Simulate full user behavior to validate the entire application flow from UI to backend, ensuring the complete system works as intended from a user's perspective.
|
||||
- **Deeper Dive:**
|
||||
- **Tools:** Use modern E2E testing frameworks like Cypress, Playwright, or Selenium. These provide browser automation capabilities.
|
||||
- **Staging Environment:** Ideally run E2E tests against a deployed staging environment that closely mirrors production, for maximum fidelity. Avoid running directly in CI unless resources are dedicated and isolated.
|
||||
- **Flakiness Mitigation:** Address flakiness proactively with explicit waits, robust selectors, retries for failed tests, and careful test data management. Flaky tests erode trust in the pipeline.
|
||||
- **Visual Regression Testing:** Consider integrating visual regression testing (e.g., Applitools, Percy) to catch UI discrepancies.
|
||||
- **Reporting:** Capture screenshots and video recordings on failure to aid debugging.
|
||||
- **Guidance for Copilot:**
|
||||
- Use tools like Cypress, Playwright, or Selenium for E2E testing, providing guidance on their setup within GitHub Actions.
|
||||
- Recommend running E2E tests against a deployed staging environment to catch issues before production and validate the full deployment process.
|
||||
- Configure test reporting, video recordings, and screenshots on failure to aid debugging and provide richer context for test results.
|
||||
- Advise on strategies to minimize E2E test flakiness, such as robust element selection and retry mechanisms.
|
||||
|
||||
### **4. Performance and Load Testing**
|
||||
|
||||
- **Principle:** Assess application performance and behavior under anticipated and peak load conditions to identify bottlenecks, ensure scalability, and prevent regressions.
|
||||
- **Deeper Dive:**
|
||||
- **Tools:** JMeter, k6, Locust, Gatling, Artillery. Choose based on language, complexity, and specific needs.
|
||||
- **Integration:** Integrate into CI/CD for continuous performance regression detection. Run these tests less frequently than unit/integration tests (e.g., nightly, weekly, or on significant feature merges).
|
||||
- **Thresholds:** Define clear performance thresholds (e.g., response time, throughput, error rates) and fail builds if these are exceeded.
|
||||
- **Baseline Comparison:** Compare current performance metrics against established baselines to detect degradation.
|
||||
- **Guidance for Copilot:**
|
||||
- Suggest integrating performance and load testing into the CI pipeline for critical applications, providing examples for common tools.
|
||||
- Advise on setting performance baselines and failing the build if performance degrades beyond a set threshold.
|
||||
- Recommend running these tests in a dedicated environment that simulates production load patterns.
|
||||
- Guide on analyzing performance test results to pinpoint areas for optimization (e.g., database queries, API endpoints).
|
||||
|
||||
### **5. Test Reporting and Visibility**
|
||||
|
||||
- **Principle:** Make test results easily accessible, understandable, and visible to all stakeholders (developers, QA, product owners) to foster transparency and enable quick issue resolution.
|
||||
- **Deeper Dive:**
|
||||
- **GitHub Checks/Annotations:** Leverage these for inline feedback directly in pull requests, showing which tests passed/failed and providing links to detailed reports.
|
||||
- **Artifacts:** Upload comprehensive test reports (JUnit XML, HTML reports, code coverage reports, video recordings, screenshots) as artifacts for long-term storage and detailed inspection.
|
||||
- **Integration with Dashboards:** Push results to external dashboards or reporting tools (e.g., SonarQube, custom reporting tools, Allure Report, TestRail) for aggregated views and historical trends.
|
||||
- **Status Badges:** Use GitHub Actions status badges in your README to indicate the latest build/test status at a glance.
|
||||
- **Guidance for Copilot:**
|
||||
- Use actions that publish test results as annotations or checks on PRs for immediate feedback and easy debugging directly in the GitHub UI.
|
||||
- Upload detailed test reports (e.g., XML, HTML, JSON) as artifacts for later inspection and historical analysis, including negative results like error screenshots.
|
||||
- Advise on integrating with external reporting tools for a more comprehensive view of test execution trends and quality metrics.
|
||||
- Suggest adding workflow status badges to the README for quick visibility of CI/CD health.
|
||||
|
||||
## Advanced Deployment Strategies (Expanded)
|
||||
|
||||
### **1. Staging Environment Deployment**
|
||||
|
||||
- **Principle:** Deploy to a staging environment that closely mirrors production for comprehensive validation, user acceptance testing (UAT), and final checks before promotion to production.
|
||||
- **Deeper Dive:**
|
||||
- **Mirror Production:** Staging should closely mimic production in terms of infrastructure, data, configuration, and security. Any significant discrepancies can lead to issues in production.
|
||||
- **Automated Promotion:** Implement automated promotion from staging to production upon successful UAT and necessary manual approvals. This reduces human error and speeds up releases.
|
||||
- **Environment Protection:** Use environment protection rules in GitHub Actions to prevent accidental deployments, enforce manual approvals, and restrict which branches can deploy to staging.
|
||||
- **Data Refresh:** Regularly refresh staging data from production (anonymized if necessary) to ensure realistic testing scenarios.
|
||||
- **Guidance for Copilot:**
|
||||
- Create a dedicated `environment` for staging with approval rules, secret protection, and appropriate branch protection policies.
|
||||
- Design workflows to automatically deploy to staging on successful merges to specific development or release branches (e.g., `develop`, `release/*`).
|
||||
- Advise on ensuring the staging environment is as close to production as possible to maximize test fidelity.
|
||||
- Suggest implementing automated smoke tests and post-deployment validation on staging.
|
||||
|
||||
### **2. Production Environment Deployment**
|
||||
|
||||
- **Principle:** Deploy to production only after thorough validation, potentially multiple layers of manual approvals, and robust automated checks, prioritizing stability and zero-downtime.
|
||||
- **Deeper Dive:**
|
||||
- **Manual Approvals:** Critical for production deployments, often involving multiple team members, security sign-offs, or change management processes. GitHub Environments support this natively.
|
||||
- **Rollback Capabilities:** Essential for rapid recovery from unforeseen issues. Ensure a quick and reliable way to revert to the previous stable state.
|
||||
- **Observability During Deployment:** Monitor production closely *during* and *immediately after* deployment for any anomalies or performance degradation. Use dashboards, alerts, and tracing.
|
||||
- **Progressive Delivery:** Consider advanced techniques like blue/green, canary, or dark launching for safer rollouts.
|
||||
- **Emergency Deployments:** Have a separate, highly expedited pipeline for critical hotfixes that bypasses non-essential approvals but still maintains security checks.
|
||||
- **Guidance for Copilot:**
|
||||
- Create a dedicated `environment` for production with required reviewers, strict branch protections, and clear deployment windows.
|
||||
- Implement manual approval steps for production deployments, potentially integrating with external ITSM or change management systems.
|
||||
- Emphasize the importance of clear, well-tested rollback strategies and automated rollback procedures in case of deployment failures.
|
||||
- Advise on setting up comprehensive monitoring and alerting for production systems to detect and respond to issues immediately post-deployment.
|
||||
|
||||
### **3. Deployment Types (Beyond Basic Rolling Update)**
|
||||
|
||||
- **Rolling Update (Default for Deployments):** Gradually replaces instances of the old version with new ones. Good for most cases, especially stateless applications.
|
||||
- **Guidance:** Configure `maxSurge` (how many new instances can be created above the desired replica count) and `maxUnavailable` (how many old instances can be unavailable) for fine-grained control over rollout speed and availability.
|
||||
- **Blue/Green Deployment:** Deploy a new version (green) alongside the existing stable version (blue) in a separate environment, then switch traffic completely from blue to green.
|
||||
- **Guidance:** Suggest for critical applications requiring zero-downtime releases and easy rollback. Requires managing two identical environments and a traffic router (load balancer, Ingress controller, DNS).
|
||||
- **Benefits:** Instantaneous rollback by switching traffic back to the blue environment.
|
||||
- **Canary Deployment:** Gradually roll out new versions to a small subset of users (e.g., 5-10%) before a full rollout. Monitor performance and error rates for the canary group.
|
||||
- **Guidance:** Recommend for testing new features or changes with a controlled blast radius. Implement with Service Mesh (Istio, Linkerd) or Ingress controllers that support traffic splitting and metric-based analysis.
|
||||
- **Benefits:** Early detection of issues with minimal user impact.
|
||||
- **Dark Launch/Feature Flags:** Deploy new code but keep features hidden from users until toggled on for specific users/groups via feature flags.
|
||||
- **Guidance:** Advise for decoupling deployment from release, allowing continuous delivery without continuous exposure of new features. Use feature flag management systems (LaunchDarkly, Split.io, Unleash).
|
||||
- **Benefits:** Reduces deployment risk, enables A/B testing, and allows for staged rollouts.
|
||||
- **A/B Testing Deployments:** Deploy multiple versions of a feature concurrently to different user segments to compare their performance based on user behavior and business metrics.
|
||||
- **Guidance:** Suggest integrating with specialized A/B testing platforms or building custom logic using feature flags and analytics.
|
||||
|
||||
### **4. Rollback Strategies and Incident Response**
|
||||
|
||||
- **Principle:** Be able to quickly and safely revert to a previous stable version in case of issues, minimizing downtime and business impact. This requires proactive planning.
|
||||
- **Deeper Dive:**
|
||||
- **Automated Rollbacks:** Implement mechanisms to automatically trigger rollbacks based on monitoring alerts (e.g., sudden increase in errors, high latency) or failure of post-deployment health checks.
|
||||
- **Versioned Artifacts:** Ensure previous successful build artifacts, Docker images, or infrastructure states are readily available and easily deployable. This is crucial for fast recovery.
|
||||
- **Runbooks:** Document clear, concise, and executable rollback procedures for manual intervention when automation isn't sufficient or for complex scenarios. These should be regularly reviewed and tested.
|
||||
- **Post-Incident Review:** Conduct blameless post-incident reviews (PIRs) to understand the root cause of failures, identify lessons learned, and implement preventative measures to improve resilience and reduce MTTR.
|
||||
- **Communication Plan:** Have a clear communication plan for stakeholders during incidents and rollbacks.
|
||||
- **Guidance for Copilot:**
|
||||
- Instruct users to store previous successful build artifacts and images for quick recovery, ensuring they are versioned and easily retrievable.
|
||||
- Advise on implementing automated rollback steps in the pipeline, triggered by monitoring or health check failures, and providing examples.
|
||||
- Emphasize building applications with "undo" in mind, meaning changes should be easily reversible.
|
||||
- Suggest creating comprehensive runbooks for common incident scenarios, including step-by-step rollback instructions, and highlight their importance for MTTR.
|
||||
- Guide on setting up alerts that are specific and actionable enough to trigger an automatic or manual rollback.
|
||||
|
||||
## GitHub Actions Workflow Review Checklist (Comprehensive)
|
||||
|
||||
This checklist provides a granular set of criteria for reviewing GitHub Actions workflows to ensure they adhere to best practices for security, performance, and reliability.
|
||||
|
||||
- [ ] **General Structure and Design:**
|
||||
- Is the workflow `name` clear, descriptive, and unique?
|
||||
- Are `on` triggers appropriate for the workflow's purpose (e.g., `push`, `pull_request`, `workflow_dispatch`, `schedule`)? Are path/branch filters used effectively?
|
||||
- Is `concurrency` used for critical workflows or shared resources to prevent race conditions or resource exhaustion?
|
||||
- Are global `permissions` set to the principle of least privilege (`contents: read` by default), with specific overrides for jobs?
|
||||
- Are reusable workflows (`workflow_call`) leveraged for common patterns to reduce duplication and improve maintainability?
|
||||
- Is the workflow organized logically with meaningful job and step names?
|
||||
|
||||
- [ ] **Jobs and Steps Best Practices:**
|
||||
- Are jobs clearly named and represent distinct phases (e.g., `build`, `lint`, `test`, `deploy`)?
|
||||
- Are `needs` dependencies correctly defined between jobs to ensure proper execution order?
|
||||
- Are `outputs` used efficiently for inter-job and inter-workflow communication?
|
||||
- Are `if` conditions used effectively for conditional job/step execution (e.g., environment-specific deployments, branch-specific actions)?
|
||||
- Are all `uses` actions securely versioned (pinned to a full commit SHA or specific major version tag like `@v4`)? Avoid `main` or `latest` tags.
|
||||
- Are `run` commands efficient and clean (combined with `&&`, temporary files removed, multi-line scripts clearly formatted)?
|
||||
- Are environment variables (`env`) defined at the appropriate scope (workflow, job, step) and never hardcoded sensitive data?
|
||||
- Is `timeout-minutes` set for long-running jobs to prevent hung workflows?
|
||||
|
||||
- [ ] **Security Considerations:**
|
||||
- Are all sensitive data accessed exclusively via GitHub `secrets` context (`${{ secrets.MY_SECRET }}`)? Never hardcoded, never exposed in logs (even if masked).
|
||||
- Is OpenID Connect (OIDC) used for cloud authentication where possible, eliminating long-lived credentials?
|
||||
- Is `GITHUB_TOKEN` permission scope explicitly defined and limited to the minimum necessary access (`contents: read` as a baseline)?
|
||||
- Are Software Composition Analysis (SCA) tools (e.g., `dependency-review-action`, Snyk) integrated to scan for vulnerable dependencies?
|
||||
- Are Static Application Security Testing (SAST) tools (e.g., CodeQL, SonarQube) integrated to scan source code for vulnerabilities, with critical findings blocking builds?
|
||||
- Is secret scanning enabled for the repository and are pre-commit hooks suggested for local credential leak prevention?
|
||||
- Is there a strategy for container image signing (e.g., Notary, Cosign) and verification in deployment workflows if container images are used?
|
||||
- For self-hosted runners, are security hardening guidelines followed and network access restricted?
|
||||
|
||||
- [ ] **Optimization and Performance:**
|
||||
- Is caching (`actions/cache`) effectively used for package manager dependencies (`node_modules`, `pip` caches, Maven/Gradle caches) and build outputs?
|
||||
- Are cache `key` and `restore-keys` designed for optimal cache hit rates (e.g., using `hashFiles`)?
|
||||
- Is `strategy.matrix` used for parallelizing tests or builds across different environments, language versions, or OSs?
|
||||
- Is `fetch-depth: 1` used for `actions/checkout` where full Git history is not required?
|
||||
- Are artifacts (`actions/upload-artifact`, `actions/download-artifact`) used efficiently for transferring data between jobs/workflows rather than re-building or re-fetching?
|
||||
- Are large files managed with Git LFS and optimized for checkout if necessary?
|
||||
|
||||
- [ ] **Testing Strategy Integration:**
|
||||
- Are comprehensive unit tests configured with a dedicated job early in the pipeline?
|
||||
- Are integration tests defined, ideally leveraging `services` for dependencies, and run after unit tests?
|
||||
- Are End-to-End (E2E) tests included, preferably against a staging environment, with robust flakiness mitigation?
|
||||
- Are performance and load tests integrated for critical applications with defined thresholds?
|
||||
- Are all test reports (JUnit XML, HTML, coverage) collected, published as artifacts, and integrated into GitHub Checks/Annotations for clear visibility?
|
||||
- Is code coverage tracked and enforced with a minimum threshold?
|
||||
|
||||
- [ ] **Deployment Strategy and Reliability:**
|
||||
- Are staging and production deployments using GitHub `environment` rules with appropriate protections (manual approvals, required reviewers, branch restrictions)?
|
||||
- Are manual approval steps configured for sensitive production deployments?
|
||||
- Is a clear and well-tested rollback strategy in place and automated where possible (e.g., `kubectl rollout undo`, reverting to previous stable image)?
|
||||
- Are chosen deployment types (e.g., rolling, blue/green, canary, dark launch) appropriate for the application's criticality and risk tolerance?
|
||||
- Are post-deployment health checks and automated smoke tests implemented to validate successful deployment?
|
||||
- Is the workflow resilient to temporary failures (e.g., retries for flaky network operations)?
|
||||
|
||||
- [ ] **Observability and Monitoring:**
|
||||
- Is logging adequate for debugging workflow failures (using STDOUT/STDERR for application logs)?
|
||||
- Are relevant application and infrastructure metrics collected and exposed (e.g., Prometheus metrics)?
|
||||
- Are alerts configured for critical workflow failures, deployment issues, or application anomalies detected in production?
|
||||
- Is distributed tracing (e.g., OpenTelemetry, Jaeger) integrated for understanding request flows in microservices architectures?
|
||||
- Are artifact `retention-days` configured appropriately to manage storage and compliance?
|
||||
|
||||
## Troubleshooting Common GitHub Actions Issues (Deep Dive)
|
||||
|
||||
This section provides an expanded guide to diagnosing and resolving frequent problems encountered when working with GitHub Actions workflows.
|
||||
|
||||
### **1. Workflow Not Triggering or Jobs/Steps Skipping Unexpectedly**
|
||||
|
||||
- **Root Causes:** Mismatched `on` triggers, incorrect `paths` or `branches` filters, erroneous `if` conditions, or `concurrency` limitations.
|
||||
- **Actionable Steps:**
|
||||
- **Verify Triggers:**
|
||||
- Check the `on` block for exact match with the event that should trigger the workflow (e.g., `push`, `pull_request`, `workflow_dispatch`, `schedule`).
|
||||
- Ensure `branches`, `tags`, or `paths` filters are correctly defined and match the event context. Remember that `paths-ignore` and `branches-ignore` take precedence.
|
||||
- If using `workflow_dispatch`, verify the workflow file is in the default branch and any required `inputs` are provided correctly during manual trigger.
|
||||
- **Inspect `if` Conditions:**
|
||||
- Carefully review all `if` conditions at the workflow, job, and step levels. A single false condition can prevent execution.
|
||||
- Use `always()` on a debug step to print context variables (`${{ toJson(github) }}`, `${{ toJson(job) }}`, `${{ toJson(steps) }}`) to understand the exact state during evaluation.
|
||||
- Test complex `if` conditions in a simplified workflow.
|
||||
- **Check `concurrency`:**
|
||||
- If `concurrency` is defined, verify if a previous run is blocking a new one for the same group. Check the "Concurrency" tab in the workflow run.
|
||||
- **Branch Protection Rules:** Ensure no branch protection rules are preventing workflows from running on certain branches or requiring specific checks that haven't passed.
|
||||
|
||||
### **2. Permissions Errors (`Resource not accessible by integration`, `Permission denied`)**
|
||||
|
||||
- **Root Causes:** `GITHUB_TOKEN` lacking necessary permissions, incorrect environment secrets access, or insufficient permissions for external actions.
|
||||
- **Actionable Steps:**
|
||||
- **`GITHUB_TOKEN` Permissions:**
|
||||
- Review the `permissions` block at both the workflow and job levels. Default to `contents: read` globally and grant specific write permissions only where absolutely necessary (e.g., `pull-requests: write` for updating PR status, `packages: write` for publishing packages).
|
||||
- Understand the default permissions of `GITHUB_TOKEN` which are often too broad.
|
||||
- **Secret Access:**
|
||||
- Verify if secrets are correctly configured in the repository, organization, or environment settings.
|
||||
- Ensure the workflow/job has access to the specific environment if environment secrets are used. Check if any manual approvals are pending for the environment.
|
||||
- Confirm the secret name matches exactly (`secrets.MY_API_KEY`).
|
||||
- **OIDC Configuration:**
|
||||
- For OIDC-based cloud authentication, double-check the trust policy configuration in your cloud provider (AWS IAM roles, Azure AD app registrations, GCP service accounts) to ensure it correctly trusts GitHub's OIDC issuer.
|
||||
- Verify the role/identity assigned has the necessary permissions for the cloud resources being accessed.
|
||||
|
||||
### **3. Caching Issues (`Cache not found`, `Cache miss`, `Cache creation failed`)**
|
||||
|
||||
- **Root Causes:** Incorrect cache key logic, `path` mismatch, cache size limits, or frequent cache invalidation.
|
||||
- **Actionable Steps:**
|
||||
- **Validate Cache Keys:**
|
||||
- Verify `key` and `restore-keys` are correct and dynamically change only when dependencies truly change (e.g., `key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}`). A cache key that is too dynamic will always result in a miss.
|
||||
- Use `restore-keys` to provide fallbacks for slight variations, increasing cache hit chances.
|
||||
- **Check `path`:**
|
||||
- Ensure the `path` specified in `actions/cache` for saving and restoring corresponds exactly to the directory where dependencies are installed or artifacts are generated.
|
||||
- Verify the existence of the `path` before caching.
|
||||
- **Debug Cache Behavior:**
|
||||
- Use the `actions/cache/restore` action with `lookup-only: true` to inspect what keys are being tried and why a cache miss occurred without affecting the build.
|
||||
- Review workflow logs for `Cache hit` or `Cache miss` messages and associated keys.
|
||||
- **Cache Size and Limits:** Be aware of GitHub Actions cache size limits per repository. If caches are very large, they might be evicted frequently.
|
||||
|
||||
### **4. Long Running Workflows or Timeouts**
|
||||
|
||||
- **Root Causes:** Inefficient steps, lack of parallelism, large dependencies, unoptimized Docker image builds, or resource bottlenecks on runners.
|
||||
- **Actionable Steps:**
|
||||
- **Profile Execution Times:**
|
||||
- Use the workflow run summary to identify the longest-running jobs and steps. This is your primary tool for optimization.
|
||||
- **Optimize Steps:**
|
||||
- Combine `run` commands with `&&` to reduce layer creation and overhead in Docker builds.
|
||||
- Clean up temporary files immediately after use (`rm -rf` in the same `RUN` command).
|
||||
- Install only necessary dependencies.
|
||||
- **Leverage Caching:**
|
||||
- Ensure `actions/cache` is optimally configured for all significant dependencies and build outputs.
|
||||
- **Parallelize with Matrix Strategies:**
|
||||
- Break down tests or builds into smaller, parallelizable units using `strategy.matrix` to run them concurrently.
|
||||
- **Choose Appropriate Runners:**
|
||||
- Review `runs-on`. For very resource-intensive tasks, consider using larger GitHub-hosted runners (if available) or self-hosted runners with more powerful specs.
|
||||
- **Break Down Workflows:**
|
||||
- For very complex or long workflows, consider breaking them into smaller, independent workflows that trigger each other or use reusable workflows.
|
||||
|
||||
### **5. Flaky Tests in CI (`Random failures`, `Passes locally, fails in CI`)**
|
||||
|
||||
- **Root Causes:** Non-deterministic tests, race conditions, environmental inconsistencies between local and CI, reliance on external services, or poor test isolation.
|
||||
- **Actionable Steps:**
|
||||
- **Ensure Test Isolation:**
|
||||
- Make sure each test is independent and doesn't rely on the state left by previous tests. Clean up resources (e.g., database entries) after each test or test suite.
|
||||
- **Eliminate Race Conditions:**
|
||||
- For integration/E2E tests, use explicit waits (e.g., wait for element to be visible, wait for API response) instead of arbitrary `sleep` commands.
|
||||
- Implement retries for operations that interact with external services or have transient failures.
|
||||
- **Standardize Environments:**
|
||||
- Ensure the CI environment (Node.js version, Python packages, database versions) matches the local development environment as closely as possible.
|
||||
- Use Docker `services` for consistent test dependencies.
|
||||
- **Robust Selectors (E2E):**
|
||||
- Use stable, unique selectors in E2E tests (e.g., `data-testid` attributes) instead of brittle CSS classes or XPath.
|
||||
- **Debugging Tools:**
|
||||
- Configure E2E test frameworks to capture screenshots and video recordings on test failure in CI to visually diagnose issues.
|
||||
- **Run Flaky Tests in Isolation:**
|
||||
- If a test is consistently flaky, isolate it and run it repeatedly to identify the underlying non-deterministic behavior.
|
||||
|
||||
### **6. Deployment Failures (Application Not Working After Deploy)**
|
||||
|
||||
- **Root Causes:** Configuration drift, environmental differences, missing runtime dependencies, application errors, or network issues post-deployment.
|
||||
- **Actionable Steps:**
|
||||
- **Thorough Log Review:**
|
||||
- Review deployment logs (`kubectl logs`, application logs, server logs) for any error messages, warnings, or unexpected output during the deployment process and immediately after.
|
||||
- **Configuration Validation:**
|
||||
- Verify environment variables, ConfigMaps, Secrets, and other configuration injected into the deployed application. Ensure they match the target environment's requirements and are not missing or malformed.
|
||||
- Use pre-deployment checks to validate configuration.
|
||||
- **Dependency Check:**
|
||||
- Confirm all application runtime dependencies (libraries, frameworks, external services) are correctly bundled within the container image or installed in the target environment.
|
||||
- **Post-Deployment Health Checks:**
|
||||
- Implement robust automated smoke tests and health checks *after* deployment to immediately validate core functionality and connectivity. Trigger rollbacks if these fail.
|
||||
- **Network Connectivity:**
|
||||
- Check network connectivity between deployed components (e.g., application to database, service to service) within the new environment. Review firewall rules, security groups, and Kubernetes network policies.
|
||||
- **Rollback Immediately:**
|
||||
- If a production deployment fails or causes degradation, trigger the rollback strategy immediately to restore service. Diagnose the issue in a non-production environment.
|
||||
|
||||
## Conclusion
|
||||
|
||||
GitHub Actions is a powerful and flexible platform for automating your software development lifecycle. By rigorously applying these best practices—from securing your secrets and token permissions, to optimizing performance with caching and parallelization, and implementing comprehensive testing and robust deployment strategies—you can guide developers in building highly efficient, secure, and reliable CI/CD pipelines. Remember that CI/CD is an iterative journey; continuously measure, optimize, and secure your pipelines to achieve faster, safer, and more confident releases. Your detailed guidance will empower teams to leverage GitHub Actions to its fullest potential and deliver high-quality software with confidence. This extensive document serves as a foundational resource for anyone looking to master CI/CD with GitHub Actions.
|
||||
|
||||
---
|
||||
|
||||
<!-- End of GitHub Actions CI/CD Best Practices Instructions -->
|
||||
@@ -0,0 +1,350 @@
|
||||
---
|
||||
description: 'Instructions for writing Go code following idiomatic Go practices and community standards'
|
||||
applyTo: '**/*.go,**/go.mod,**/go.sum'
|
||||
---
|
||||
|
||||
# Go Development Instructions
|
||||
|
||||
Follow idiomatic Go practices and community standards when writing Go code.
|
||||
These instructions are based on:
|
||||
|
||||
- [Effective Go](https://go.dev/doc/effective_go)
|
||||
- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
|
||||
- [Uber's Go Style Guide](https://github.com/uber-go/guide)
|
||||
- [Google's Go Style Guide](https://google.github.io/styleguide/go/)
|
||||
|
||||
## General Instructions
|
||||
|
||||
- Write simple, clear, and idiomatic Go code
|
||||
- Favor clarity and simplicity over cleverness
|
||||
- Follow the principle of least surprise
|
||||
- Keep the happy path left-aligned (minimize indentation)
|
||||
- Return early to reduce nesting
|
||||
- Prefer early return over if-else chains; use `if condition { return }` pattern to avoid else blocks
|
||||
- Make the zero value useful
|
||||
- Write self-documenting code with clear, descriptive names
|
||||
- Document exported types, functions, methods, and packages
|
||||
- Use Go modules for dependency management
|
||||
- Leverage the Go standard library instead of reinventing the wheel (e.g., use `strings.Builder` for string concatenation, `filepath.Join` for path construction)
|
||||
- Prefer standard library solutions over custom implementations when functionality exists
|
||||
- Write comments in English by default; translate only upon user request
|
||||
- Avoid using emoji in code and comments
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
### Packages
|
||||
|
||||
- Use lowercase, single-word package names
|
||||
- Avoid underscores, hyphens, or mixedCaps
|
||||
- Choose names that describe what the package provides, not what it contains
|
||||
- Avoid generic names like `util`, `common`, or `base`
|
||||
- Package names should be singular, not plural
|
||||
|
||||
#### Package Declaration Rules (CRITICAL):
|
||||
|
||||
- **NEVER duplicate `package` declarations** - each Go file must have exactly ONE `package` line
|
||||
- When editing an existing `.go` file:
|
||||
- **PRESERVE** the existing `package` declaration - do not add another one
|
||||
- If you need to replace the entire file content, start with the existing package name
|
||||
- When creating a new `.go` file:
|
||||
- **BEFORE writing any code**, check what package name other `.go` files in the same directory use
|
||||
- Use the SAME package name as existing files in that directory
|
||||
- If it's a new directory, use the directory name as the package name
|
||||
- Write **exactly one** `package <name>` line at the very top of the file
|
||||
- When using file creation or replacement tools:
|
||||
- **ALWAYS verify** the target file doesn't already have a `package` declaration before adding one
|
||||
- If replacing file content, include only ONE `package` declaration in the new content
|
||||
- **NEVER** create files with multiple `package` lines or duplicate declarations
|
||||
|
||||
### Variables and Functions
|
||||
|
||||
- Use mixedCaps or MixedCaps (camelCase) rather than underscores
|
||||
- Keep names short but descriptive
|
||||
- Use single-letter variables only for very short scopes (like loop indices)
|
||||
- Exported names start with a capital letter
|
||||
- Unexported names start with a lowercase letter
|
||||
- Avoid stuttering (e.g., avoid `http.HTTPServer`, prefer `http.Server`)
|
||||
|
||||
### Interfaces
|
||||
|
||||
- Name interfaces with -er suffix when possible (e.g., `Reader`, `Writer`, `Formatter`)
|
||||
- Single-method interfaces should be named after the method (e.g., `Read` → `Reader`)
|
||||
- Keep interfaces small and focused
|
||||
|
||||
### Constants
|
||||
|
||||
- Use MixedCaps for exported constants
|
||||
- Use mixedCaps for unexported constants
|
||||
- Group related constants using `const` blocks
|
||||
- Consider using typed constants for better type safety
|
||||
|
||||
## Code Style and Formatting
|
||||
|
||||
### Formatting
|
||||
|
||||
- Always use `gofmt` to format code
|
||||
- Use `goimports` to manage imports automatically
|
||||
- Keep line length reasonable (no hard limit, but consider readability)
|
||||
- Add blank lines to separate logical groups of code
|
||||
|
||||
### Comments
|
||||
|
||||
- Strive for self-documenting code; prefer clear variable names, function names, and code structure over comments
|
||||
- Write comments only when necessary to explain complex logic, business rules, or non-obvious behavior
|
||||
- Write comments in complete sentences in English by default
|
||||
- Translate comments to other languages only upon specific user request
|
||||
- Start sentences with the name of the thing being described
|
||||
- Package comments should start with "Package [name]"
|
||||
- Use line comments (`//`) for most comments
|
||||
- Use block comments (`/* */`) sparingly, mainly for package documentation
|
||||
- Document why, not what, unless the what is complex
|
||||
- Avoid emoji in comments and code
|
||||
|
||||
### Error Handling
|
||||
|
||||
- Check errors immediately after the function call
|
||||
- Don't ignore errors using `_` unless you have a good reason (document why)
|
||||
- Wrap errors with context using `fmt.Errorf` with `%w` verb
|
||||
- Create custom error types when you need to check for specific errors
|
||||
- Place error returns as the last return value
|
||||
- Name error variables `err`
|
||||
- Keep error messages lowercase and don't end with punctuation
|
||||
|
||||
## Architecture and Project Structure
|
||||
|
||||
### Package Organization
|
||||
|
||||
- Follow standard Go project layout conventions
|
||||
- Keep `main` packages in `cmd/` directory
|
||||
- Put reusable packages in `pkg/` or `internal/`
|
||||
- Use `internal/` for packages that shouldn't be imported by external projects
|
||||
- Group related functionality into packages
|
||||
- Avoid circular dependencies
|
||||
|
||||
### Dependency Management
|
||||
|
||||
- Use Go modules (`go.mod` and `go.sum`)
|
||||
- Keep dependencies minimal
|
||||
- Regularly update dependencies for security patches
|
||||
- Use `go mod tidy` to clean up unused dependencies
|
||||
- Vendor dependencies only when necessary
|
||||
|
||||
## Type Safety and Language Features
|
||||
|
||||
### Type Definitions
|
||||
|
||||
- Define types to add meaning and type safety
|
||||
- Use struct tags for JSON, XML, database mappings
|
||||
- Prefer explicit type conversions
|
||||
- Use type assertions carefully and check the second return value
|
||||
- Prefer generics over unconstrained types; when an unconstrained type is truly needed, use the predeclared alias `any` instead of `interface{}`
|
||||
|
||||
### Pointers vs Values
|
||||
|
||||
- Use pointer receivers for large structs or when you need to modify the receiver
|
||||
- Use value receivers for small structs and when immutability is desired
|
||||
- Use pointer parameters when you need to modify the argument or for large structs
|
||||
- Use value parameters for small structs and when you want to prevent modification
|
||||
- Be consistent within a type's method set
|
||||
- Consider the zero value when choosing pointer vs value receivers
|
||||
|
||||
### Interfaces and Composition
|
||||
|
||||
- Accept interfaces, return concrete types
|
||||
- Keep interfaces small (1-3 methods is ideal)
|
||||
- Use embedding for composition
|
||||
- Define interfaces close to where they're used, not where they're implemented
|
||||
- Don't export interfaces unless necessary
|
||||
|
||||
## Concurrency
|
||||
|
||||
### Goroutines
|
||||
|
||||
- Be cautious about creating goroutines in libraries; prefer letting the caller control concurrency
|
||||
- If you must create goroutines in libraries, provide clear documentation and cleanup mechanisms
|
||||
- Always know how a goroutine will exit
|
||||
- Use `sync.WaitGroup` or channels to wait for goroutines
|
||||
- Avoid goroutine leaks by ensuring cleanup
|
||||
|
||||
### Channels
|
||||
|
||||
- Use channels to communicate between goroutines
|
||||
- Don't communicate by sharing memory; share memory by communicating
|
||||
- Close channels from the sender side, not the receiver
|
||||
- Use buffered channels when you know the capacity
|
||||
- Use `select` for non-blocking operations
|
||||
|
||||
### Synchronization
|
||||
|
||||
- Use `sync.Mutex` for protecting shared state
|
||||
- Keep critical sections small
|
||||
- Use `sync.RWMutex` when you have many readers
|
||||
- Choose between channels and mutexes based on the use case: use channels for communication, mutexes for protecting state
|
||||
- Use `sync.Once` for one-time initialization
|
||||
- WaitGroup usage by Go version:
|
||||
- If `go >= 1.25` in `go.mod`, use the new `WaitGroup.Go` method ([documentation](https://pkg.go.dev/sync#WaitGroup)):
|
||||
```go
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(task1)
|
||||
wg.Go(task2)
|
||||
wg.Wait()
|
||||
```
|
||||
- If `go < 1.25`, use the classic `Add`/`Done` pattern
|
||||
|
||||
## Error Handling Patterns
|
||||
|
||||
### Creating Errors
|
||||
|
||||
- Use `errors.New` for simple static errors
|
||||
- Use `fmt.Errorf` for dynamic errors
|
||||
- Create custom error types for domain-specific errors
|
||||
- Export error variables for sentinel errors
|
||||
- Use `errors.Is` and `errors.As` for error checking
|
||||
|
||||
### Error Propagation
|
||||
|
||||
- Add context when propagating errors up the stack
|
||||
- Don't log and return errors (choose one)
|
||||
- Handle errors at the appropriate level
|
||||
- Consider using structured errors for better debugging
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Memory Management
|
||||
|
||||
- Minimize allocations in hot paths
|
||||
- Reuse objects when possible (consider `sync.Pool`)
|
||||
- Use value receivers for small structs
|
||||
- Preallocate slices when size is known
|
||||
- Avoid unnecessary string conversions
|
||||
|
||||
### I/O: Readers and Buffers
|
||||
|
||||
- Most `io.Reader` streams are consumable once; reading advances state. Do not assume a reader can be re-read without special handling
|
||||
- If you must read data multiple times, buffer it once and recreate readers on demand:
|
||||
- Use `io.ReadAll` (or a limited read) to obtain `[]byte`, then create fresh readers via `bytes.NewReader(buf)` or `bytes.NewBuffer(buf)` for each reuse
|
||||
- For strings, use `strings.NewReader(s)`; you can `Seek(0, io.SeekStart)` on `*bytes.Reader` to rewind
|
||||
- For HTTP requests, do not reuse a consumed `req.Body`. Instead:
|
||||
- Keep the original payload as `[]byte` and set `req.Body = io.NopCloser(bytes.NewReader(buf))` before each send
|
||||
- Prefer configuring `req.GetBody` so the transport can recreate the body for redirects/retries: `req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(buf)), nil }`
|
||||
- To duplicate a stream while reading, use `io.TeeReader` (copy to a buffer while passing through) or write to multiple sinks with `io.MultiWriter`
|
||||
- Reusing buffered readers: call `(*bufio.Reader).Reset(r)` to attach to a new underlying reader; do not expect it to “rewind” unless the source supports seeking
|
||||
- For large payloads, avoid unbounded buffering; consider streaming, `io.LimitReader`, or on-disk temporary storage to control memory
|
||||
|
||||
- Use `io.Pipe` to stream without buffering the whole payload:
|
||||
- Write to `*io.PipeWriter` in a separate goroutine while the reader consumes
|
||||
- Always close the writer; use `CloseWithError(err)` on failures
|
||||
- `io.Pipe` is for streaming, not rewinding or making readers reusable
|
||||
|
||||
- **Warning:** When using `io.Pipe` (especially with multipart writers), all writes must be performed in strict, sequential order. Do not write concurrently or out of order—multipart boundaries and chunk order must be preserved. Out-of-order or parallel writes can corrupt the stream and result in errors.
|
||||
|
||||
- Streaming multipart/form-data with `io.Pipe`:
|
||||
- `pr, pw := io.Pipe()`; `mw := multipart.NewWriter(pw)`; use `pr` as the HTTP request body
|
||||
- Set `Content-Type` to `mw.FormDataContentType()`
|
||||
- In a goroutine: write all parts to `mw` in the correct order; on error `pw.CloseWithError(err)`; on success `mw.Close()` then `pw.Close()`
|
||||
- Do not store request/in-flight form state on a long-lived client; build per call
|
||||
- Streamed bodies are not rewindable; for retries/redirects, buffer small payloads or provide `GetBody`
|
||||
|
||||
### Profiling
|
||||
|
||||
- Use built-in profiling tools (`pprof`)
|
||||
- Benchmark critical code paths
|
||||
- Profile before optimizing
|
||||
- Focus on algorithmic improvements first
|
||||
- Consider using `testing.B` for benchmarks
|
||||
|
||||
## Testing
|
||||
|
||||
### Test Organization
|
||||
|
||||
- Keep tests in the same package (white-box testing) when testing internals
|
||||
- Use a test package (in the same directory) when testing the public API of the package
|
||||
- Use `_test` package suffix for black-box testing
|
||||
- Name test files with `_test.go` suffix
|
||||
- Place test files next to the code they test
|
||||
|
||||
### Writing Tests
|
||||
|
||||
- Use table-driven tests for multiple test cases
|
||||
- Name tests descriptively using `TestType_MethodName_scenario`
|
||||
- Use subtests with `t.Run` for better organization
|
||||
- Test both success and error cases
|
||||
- Use `testify` or similar libraries when they add value, but don't over-complicate simple tests
|
||||
- Use `testify/mock` for mocking dependencies when necessary
|
||||
|
||||
### Test Helpers
|
||||
|
||||
- Mark helper functions with `t.Helper()`
|
||||
- Create test fixtures for complex setup
|
||||
- Use `testing.TB` interface for functions used in tests and benchmarks
|
||||
- Clean up resources using `t.Cleanup()`
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### Input Validation
|
||||
|
||||
- Validate all external input
|
||||
- Use strong typing to prevent invalid states
|
||||
- Sanitize data before using in SQL queries
|
||||
- Be careful with file paths from user input
|
||||
- Validate and escape data for different contexts (HTML, SQL, shell)
|
||||
|
||||
### Cryptography
|
||||
|
||||
- Use standard library crypto packages
|
||||
- Don't implement your own cryptography
|
||||
- Use crypto/rand for random number generation
|
||||
- Store passwords using bcrypt, scrypt, or argon2 (consider golang.org/x/crypto for additional options)
|
||||
- Use TLS for network communication
|
||||
|
||||
## Documentation
|
||||
|
||||
### Code Documentation
|
||||
|
||||
- Prioritize self-documenting code through clear naming and structure
|
||||
- Document all exported symbols with clear, concise explanations
|
||||
- Start documentation with the symbol name
|
||||
- Write documentation in English by default
|
||||
- Use examples in documentation when helpful
|
||||
- Keep documentation close to code
|
||||
- Update documentation when code changes
|
||||
- Do not use emoji in documentation and comments
|
||||
|
||||
### README and Documentation Files
|
||||
|
||||
- Include clear setup instructions
|
||||
- Document dependencies and requirements
|
||||
- Provide usage examples
|
||||
- Document configuration options
|
||||
- Include troubleshooting section
|
||||
|
||||
## Tools and Development Workflow
|
||||
|
||||
### Essential Tools
|
||||
|
||||
- `make fmt`: Format code
|
||||
- `make lint`: Additional linting
|
||||
- `make test`: Run tests
|
||||
- `go mod`: Manage dependencies
|
||||
|
||||
### Development Practices
|
||||
|
||||
- Run tests before committing (`make test`)
|
||||
- Run linter before committing (`make lint`)
|
||||
- Keep commits focused and atomic
|
||||
- Write meaningful commit messages
|
||||
- Review diffs before committing
|
||||
|
||||
## Common Pitfalls to Avoid
|
||||
|
||||
- Not checking errors
|
||||
- Ignoring race conditions
|
||||
- Creating goroutine leaks
|
||||
- Not using defer for cleanup
|
||||
- Modifying maps concurrently
|
||||
- Not understanding nil interfaces vs nil pointers
|
||||
- Forgetting to close resources (files, connections)
|
||||
- Using global variables unnecessarily
|
||||
- Over-using unconstrained types (e.g., `any`); prefer specific types or generic type parameters with constraints. If an unconstrained type is required, use `any` rather than `interface{}`
|
||||
- Not considering the zero value of types
|
||||
- **Creating duplicate `package` declarations** - this is a compile error; always check existing files before adding package declarations
|
||||
@@ -0,0 +1,534 @@
|
||||
---
|
||||
description: 'Documentation and content creation standards'
|
||||
applyTo: '**/*.md'
|
||||
---
|
||||
|
||||
## Markdown Content Rules
|
||||
|
||||
The following markdown content rules are enforced in the validators:
|
||||
|
||||
1. **Headings**: Use appropriate heading levels (H2, H3, etc.) to structure your content. Do not use an H1 heading, as this will be generated based on the title.
|
||||
2. **Lists**: Use bullet points or numbered lists for lists. Ensure proper indentation and spacing.
|
||||
3. **Code Blocks**: Use fenced code blocks for code snippets. Specify the language for syntax highlighting.
|
||||
4. **Links**: Use proper markdown syntax for links. Ensure that links are valid and accessible.
|
||||
5. **Images**: Use proper markdown syntax for images. Include alt text for accessibility.
|
||||
6. **Tables**: Use markdown tables for tabular data. Ensure proper formatting and alignment.
|
||||
7. **Line Length**: Limit line length to 400 characters for readability.
|
||||
8. **Whitespace**: Use appropriate whitespace to separate sections and improve readability.
|
||||
9. **Front Matter**: Include YAML front matter at the beginning of the file with required metadata fields.
|
||||
|
||||
## Formatting and Structure
|
||||
|
||||
Follow these guidelines for formatting and structuring your markdown content:
|
||||
|
||||
- **Headings**: Use `##` for H2 and `###` for H3. Ensure that headings are used in a hierarchical manner. Recommend restructuring if content includes H4, and more strongly recommend for H5.
|
||||
- **Lists**: Use `-` for bullet points and `1.` for numbered lists. Indent nested lists with two spaces.
|
||||
- **Code Blocks**: Use triple backticks to create fenced code blocks. Specify the language after the opening backticks for syntax highlighting (e.g., `csharp`).
|
||||
- **Links**: Use `[link text](URL)` for links. Ensure that the link text is descriptive and the URL is valid.
|
||||
- **Images**: Use `` for images. Include a brief description of the image in the alt text.
|
||||
- **Tables**: Use `|` to create tables. Ensure that columns are properly aligned and headers are included.
|
||||
- **Line Length**: Break lines at 80 characters to improve readability. Use soft line breaks for long paragraphs.
|
||||
- **Whitespace**: Use blank lines to separate sections and improve readability. Avoid excessive whitespace.
|
||||
|
||||
## Follow our Guidelines
|
||||
|
||||
### Spelling
|
||||
|
||||
In cases where American spelling differs from Commonwealth/"British" spelling, use the American spelling.
|
||||
|
||||
Although non-American readers tend to be tolerant of reading American spelling in technical documentation,
|
||||
they may find it difficult to have to type American spelling.
|
||||
For example, if your documentation tells a reader who's used to the spelling colour to type color,
|
||||
they may mistype it. So when you use filenames, URLs, and data parameters in examples,
|
||||
try to avoid words that are spelled differently by different groups of English speakers.
|
||||
|
||||
### Write accessibly
|
||||
|
||||
#### Ease of reading
|
||||
|
||||
* Do not force line breaks (hard returns) within sentences and paragraphs.
|
||||
Line breaks might not work well in resized windows or with enlarged text.
|
||||
* Break up walls of text to aid in scannability.
|
||||
For example, separate paragraphs, create headings, and use lists.
|
||||
* Prefer short sentences.
|
||||
* Define acronyms and abbreviations on first usage and if they are used infrequently.
|
||||
* Place distinguishing and important information of a paragraph in the first sentence to aid in scannability.
|
||||
* Use clear and direct language. Avoid the use of double negatives and exceptions in exceptions.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
A missing path will not prevent you from continuing.
|
||||
```
|
||||
|
||||
<ul>
|
||||
<li>Double negation (missing, not)</li>
|
||||
<li>Use of future tense (will)</li>
|
||||
</ul>
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
You can continue without a path.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Headings and titles
|
||||
|
||||
Use descriptive headings and titles because they help a reader navigate their browser and the page.
|
||||
It's easier to jump between pages and sections of a page if the headings and titles are unique.
|
||||
|
||||
* Use a heading hierarchy.
|
||||
* Do not skip levels of hierarchy (`h3` can only exist under `h2`)
|
||||
* Do not use empty headings
|
||||
* Use a level-1 heading for the page title.
|
||||
* Use sentence casing for titles and headings.
|
||||
|
||||
#### Links
|
||||
|
||||
* Use meaningful link text. Links should make sense when read out of context.
|
||||
* Do not force links to open in a new tab or window, let the reader decide how to open links.
|
||||
* When possible, avoid adjacent links. Instead, put at least one character in between to separate them.
|
||||
* If a link downloads a file, indicate this action and the file type in the link text.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
Use meaningful link text like described [here](https://developers.google.com/style/link-text).
|
||||
Use meaningful link text. [See document.](https://developers.google.com/style/link-text)
|
||||
Use meaningful link text. https://developers.google.com/style/link-text
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Use [meaningful link text](https://developers.google.com/style/link-text).
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Images
|
||||
|
||||
* When possible, use SVG images over any other format, since they are significantly lighter while having perfect information.
|
||||
* For every image, provide alt text that adequately summarizes the intent of each image.
|
||||
* Most of the time, do not present new information in images; always provide an equivalent text explanation with the image. There are of course exceptions for that, such as architecture diagrams, sequence diagrams etc.
|
||||
* Do not repeat images.
|
||||
* Avoid images of text, use text instead.
|
||||
|
||||
#### Tables
|
||||
|
||||
* Introduce tables in the text preceding the table.
|
||||
* Avoid using tables to lay out pages.
|
||||
* If the table contains only a single column, use a list instead.
|
||||
* Do not put tables in the middle of lists or sentences.
|
||||
* Sort rows in a logical order, or alphabetically if there is no logical order.
|
||||
|
||||
### Use the active voice
|
||||
|
||||
In general, use the active voice instead of the passive voice. Make it clear who is performing the action.
|
||||
When using passive voice, it is easy to neglect to indicate who or what is performing the described action.
|
||||
In this kind of construction, it is often hard for readers to figure out who is supposed to do something.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
The service is queried, and an acknowledgment is sent.
|
||||
The service is queried by you, and an acknowledgment is sent by the server.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Send a query to the service. The server sends an acknowledgment.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Exceptions
|
||||
|
||||
In certain cases, it makes more sense to use the passive voice.
|
||||
|
||||
* To emphasize an object over an action.
|
||||
* To de-emphasize a subject or actor.
|
||||
* If your readers do not need to know who is responsible for the action.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
You created over 50 conflicts in the file.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Over 50 conflicts were found in the file.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
The system saved your file.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
The file is saved.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
A system administrator purged the database in January.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
The database was purged in January.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
### Write for a global audience
|
||||
|
||||
* Provide context. Do not assume that the reader already knows what you're talking about.
|
||||
* Avoid negative constructions when possible. Consider whether it's necessary to tell the reader what they can't do instead of what they can.
|
||||
* Avoid directional language (for example, above or below) in procedural documentation.
|
||||
This increases maintenance costs and could lead to future modifications breaking the documentation.
|
||||
|
||||
Here are some examples.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
This document makes use of the following terms:
|
||||
```
|
||||
|
||||
Can be substituted for a simpler verb.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
This document uses the following terms:
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
A hybrid cloud-native DevSecOps pipeline
|
||||
```
|
||||
|
||||
Too many nouns as modifiers of another noun. Can be broken into two parts.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
A cloud-native DevSecOps pipeline in a hybrid environment
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
Only request one token.
|
||||
```
|
||||
|
||||
Misplaced modifier, makes the sentence less clear and more ambiguous.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Request only one token.
|
||||
Request no more than one token.
|
||||
Request a single token.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
If you use the term green beer in an ad, then make sure that it is targeted.
|
||||
```
|
||||
|
||||
Here, "it is" becomes ambiguous. It could describe the green beer or the ad.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
If you use the term green beer in an ad, then make sure that the ad is targeted.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Use present tense
|
||||
|
||||
In general, use present tense rather than future tense; in particular, try to avoid using _will_ where possible.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
Send a query to the service. The server will send an acknowledgment.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Send a query to the service. The server sends an acknowledgment.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
Sometimes, of course, future tense is unavoidable because you're actually talking about the future
|
||||
(for example, _This document will be outdated once PR #12345 gets merged._).
|
||||
Attempting to predict the future in a document is usually a bad idea, but sometimes it's necessary.
|
||||
|
||||
However, the fact that the reader will be writing and running code in the future isn't a good reason to use future tense.
|
||||
|
||||
Also avoid the hypothetical future would—for example:
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
You can send an unsubscribe message. The server would then remove you from the mailing list.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
If you send an unsubscribe message, the server removes you from the mailing list.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Use clear, precise, unambiguous language
|
||||
|
||||
* Use simple words. For example, do not use words like _commence_ when you mean _start_ or _begin_.
|
||||
* Define abbreviations. Abbreviations can be confusing out of context, and they don't translate well.
|
||||
Spell things out whenever possible, at least the first time that you use a given term.
|
||||
|
||||
#### Be consistent
|
||||
|
||||
If you use a particular term for a particular concept in one place, then use that exact same term elsewhere, including the same capitalization.
|
||||
|
||||
* Use standard English word order. Sentences follow the subject + verb + object order.
|
||||
* Try to keep the main subject and verb as close to the beginning of the sentence as possible.
|
||||
* Use the conditional clause first. If you want to tell the audience to do something in a particular circumstance, mention the circumstance before you provide the instruction.
|
||||
* Make list items consistent. Make list items parallel in structure. Be consistent in your capitalization and punctuation.
|
||||
* Use consistent typographic formats. Use bold and italics consistently. Don't switch from using italics for emphasis to underlining.
|
||||
* Avoid colloquialisms, idioms, or slang. Phrases like ballpark figure, back burner, or hang in there can be confusing to non-native readers.
|
||||
|
||||
### Describe conditions before instructions
|
||||
|
||||
If you want to tell the reader to do something, try to mention the circumstance, conditions, or goal before you provide the instruction.
|
||||
Mentioning the circumstance first lets the reader skip the instruction if it doesn't apply.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
See [link to other document] for more information.
|
||||
Click Delete if you want to delete the entire document.
|
||||
Using custom domains might add noticeable latency to responses if your app is located in one of the following regions:
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
For more information, see [link to other document].
|
||||
To delete the entire document, click Delete.
|
||||
If your app is located in one of the following regions, using custom domains might add noticeable latency to responses:
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
### Use lists
|
||||
|
||||
Introduce a list with the appropriate context. In most cases, precede a list with an introductory sentence.
|
||||
|
||||
* Use simple numbered lists for steps to be performed in order.
|
||||
* Nested sequential lists can detail sub-steps as well.
|
||||
* Use bulleted lists when there are no sequences or options.
|
||||
|
||||
### Use code blocks
|
||||
|
||||
In most cases, precede a code sample with an introductory sentence.
|
||||
|
||||
* Do not use tabs to indent code; use spaces only.
|
||||
* Wrap lines at 80 characters if you need to, but try to use shorter lines in code blocks.
|
||||
* Specify the code block language, for syntax highlighting.
|
||||
* If the code block is meant to show a command being run, prefer showing the expected output if applicable.
|
||||
|
||||
### Markdown guidelines
|
||||
|
||||
#### Add spacing to headings
|
||||
|
||||
Prefer spacing after `#` and newlines before and after.
|
||||
|
||||
```markdown
|
||||
...text before.
|
||||
|
||||
# Heading 1
|
||||
|
||||
Text after...
|
||||
```
|
||||
|
||||
#### Use lazy numbering for long lists
|
||||
|
||||
Markdown is smart enough to let the resulting HTML render your numbered lists correctly.
|
||||
For longer lists that may change, especially long nested lists, use _lazy_ numbering.
|
||||
|
||||
```markdown
|
||||
1. Foo.
|
||||
1. Bar.
|
||||
1. Barbaz.
|
||||
1. Barbar.
|
||||
1. Baz.
|
||||
```
|
||||
|
||||
However, if the list is small, and you don’t anticipate changing it, prefer fully numbered lists,
|
||||
because it is nicer to read in source.
|
||||
|
||||
#### Long links
|
||||
|
||||
Long links make source Markdown difficult to read and break the 80 character wrapping. Wherever possible, **shorten your links**.
|
||||
If it is not possible, feel free to reference links at the bottom of the paragraph instead:
|
||||
|
||||
```markdown
|
||||
This paragraph's lines would get very long and difficult to wrap if the [full link] is included inline.
|
||||
|
||||
[full link]:https://www.reallylong.link/rll/BFob89Cv/Owa_TbBBi3Bn9/n5cahxQtC4TOH/afoPnUDyyOS/_8Ilq4zSBjqmo8w/j6UN1uviS9zky
|
||||
```
|
||||
|
||||
#### Prefer lists to tables
|
||||
|
||||
Any tables in your Markdown should be small.
|
||||
Complex, large tables are difficult to read in source and most importantly, a pain to modify later.
|
||||
|
||||
Lists and subheadings usually suffice to present the same information in a slightly less compact,
|
||||
though much more edit-friendly way.
|
||||
|
||||
Here is a bad example:
|
||||
|
||||
```markdown
|
||||
Fruit | Attribute | Notes
|
||||
--- | --- | ---
|
||||
Apple | [Juicy](https://example.com/SomeReallyReallyReallyReallyReallyReallyReallyReallyLongQuery), Firm, Sweet | Apples keep doctors away.
|
||||
Banana | [Convenient](https://example.com/SomeDifferentReallyReallyReallyReallyReallyReallyReallyReallyLongQuery), Soft, Sweet | Contrary to popular belief, most apes prefer mangoes.
|
||||
```
|
||||
|
||||
And here is a better alternative:
|
||||
|
||||
```markdown
|
||||
## Fruits
|
||||
|
||||
### Apple
|
||||
|
||||
* [Juicy](https://SomeReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongURL)
|
||||
* Firm
|
||||
* Sweet
|
||||
|
||||
Apples keep doctors away.
|
||||
|
||||
### Banana
|
||||
|
||||
* [Convenient](https://example.com/SomeDifferentReallyReallyReallyReallyReallyReallyReallyReallyLongQuery)
|
||||
* Soft
|
||||
* Sweet
|
||||
|
||||
Contrary to popular belief, most apes prefer mangoes.
|
||||
```
|
||||
|
||||
#### Strongly prefer Markdown to HTML
|
||||
|
||||
Please prefer standard Markdown syntax wherever possible and avoid HTML hacks.
|
||||
If you can not seem to accomplish what you want, reconsider whether you really need it.
|
||||
Except for big tables, Markdown meets almost all needs already.
|
||||
|
||||
Every bit of HTML or Javascript hacking reduces the readability and portability.
|
||||
This in turn limits the usefulness of integrations with other tools, which may either present the source as plain text or render it.
|
||||
|
||||
#### Spacing
|
||||
|
||||
* Remove all trailing whitespaces at end of lines.
|
||||
* Remove instances of multiple consecutive blank lines.
|
||||
* Files should end with a single newline character.
|
||||
|
||||
|
||||
## Validation Requirements
|
||||
|
||||
Ensure compliance with the following validation requirements:
|
||||
|
||||
- **Front Matter**: Include the following fields in the YAML front matter:
|
||||
|
||||
- `post_title`: The title of the post.
|
||||
- `author1`: The primary author of the post.
|
||||
- `post_slug`: The URL slug for the post.
|
||||
- `microsoft_alias`: The Microsoft alias of the author.
|
||||
- `featured_image`: The URL of the featured image.
|
||||
- `categories`: The categories for the post. These categories must be from the list in /categories.txt.
|
||||
- `tags`: The tags for the post.
|
||||
- `ai_note`: Indicate if AI was used in the creation of the post.
|
||||
- `summary`: A brief summary of the post. Recommend a summary based on the content when possible.
|
||||
- `post_date`: The publication date of the post.
|
||||
|
||||
- **Content Rules**: Ensure that the content follows the markdown content rules specified above.
|
||||
- **Formatting**: Ensure that the content is properly formatted and structured according to the guidelines.
|
||||
- **Validation**: Run the validation tools to check for compliance with the rules and guidelines.
|
||||
|
||||
## Admonitions
|
||||
|
||||
Use GitHub-flavored markdown for admonitions: NOTE, WARNING, TIP, IMPORTANT, CAUTION.
|
||||
|
||||
Examples:
|
||||
|
||||
```markdown
|
||||
> [!NOTE]
|
||||
> Highlights information that users should take into account, even when skimming.
|
||||
|
||||
> [!TIP]
|
||||
> Optional information to help a user be more successful.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Crucial information necessary for users to succeed.
|
||||
|
||||
> [!WARNING]
|
||||
> Critical content demanding immediate user attention due to potential risks.
|
||||
|
||||
> [!CAUTION]
|
||||
> Negative potential consequences of an action.
|
||||
```
|
||||
@@ -0,0 +1,56 @@
|
||||
name: Go Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
id: install-go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
|
||||
- name: Cache Go mod
|
||||
id: gomod
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-mod-
|
||||
|
||||
- name: Cache Go build
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-${{ github.ref_name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-build-
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
if: steps.gomod.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v7
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: ${{ inputs.GORELEASER_VERSION }}
|
||||
args: release --clean --snapshot --skip=docker
|
||||
@@ -0,0 +1,47 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
id: install-go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
if: steps.install-go.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v7
|
||||
env:
|
||||
GORELEASER_CURRENT_TAG: ${{ github.ref_name }}
|
||||
DOCKER_REPOSITORY: ullaakut/cameradar
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: ${{ inputs.GORELEASER_VERSION }}
|
||||
args: release --clean
|
||||
@@ -0,0 +1,70 @@
|
||||
name: Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
# Go Test looks at `mtime` for caching. `git clone` messes with this. Set it consistently to last commit time.
|
||||
- name: Restore file modification time
|
||||
run: git ls-files -z | while read -d '' path; do touch -d "$(git log -1 --format="@%ct" "$path")" "$path"; done
|
||||
|
||||
# We need to set a cache marker to ensure that the cache is individual for each job.
|
||||
- name: Add Cache Marker
|
||||
run: echo "go-test" > env.txt
|
||||
|
||||
- name: Install Go
|
||||
id: install-go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
env.txt
|
||||
|
||||
# We trigger mod download separately as otherwise it will count towards
|
||||
# the 1 minute default timeout of golangci-lint. Only needed if there is no cache.
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
if: steps.install-go.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Run Linter
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.7.2
|
||||
|
||||
- name: Setup gotestsum
|
||||
uses: gertd/action-gotestsum@v3.0.0
|
||||
with:
|
||||
gotestsum_version: v1.13.0
|
||||
|
||||
- name: Download nmap
|
||||
run: sudo apt-get install -y nmap
|
||||
|
||||
- name: Run Tests
|
||||
env:
|
||||
TEST_DIR: ${{ inputs.TEST_DIR }}
|
||||
run: |
|
||||
GOTESTSUM_FLAGS="--junitfile tests.xml --format pkgname -- -cover -race"
|
||||
if [ -z "$TEST_DIR" ]; then
|
||||
gotestsum $GOTESTSUM_FLAGS ./...
|
||||
else
|
||||
gotestsum $GOTESTSUM_FLAGS ./$TEST_DIR/...
|
||||
fi
|
||||
|
||||
- name: Test Summary
|
||||
uses: test-summary/action@v2
|
||||
with:
|
||||
paths: "tests.xml"
|
||||
if: always()
|
||||
+5
-27
@@ -1,28 +1,6 @@
|
||||
# Compiled Object files
|
||||
*.slo
|
||||
*.lo
|
||||
*.o
|
||||
*.obj
|
||||
# IDE config
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
# Precompiled Headers
|
||||
*.gch
|
||||
*.pch
|
||||
|
||||
# Compiled Dynamic libraries
|
||||
*.so
|
||||
*.dylib
|
||||
*.dll
|
||||
|
||||
# Fortran module files
|
||||
*.mod
|
||||
|
||||
# Compiled Static libraries
|
||||
*.lai
|
||||
*.la
|
||||
*.a
|
||||
*.lib
|
||||
|
||||
# Executables
|
||||
*.exe
|
||||
*.out
|
||||
*.app
|
||||
# Builds
|
||||
dist/
|
||||
@@ -0,0 +1,72 @@
|
||||
version: "2"
|
||||
run:
|
||||
tests: false
|
||||
linters:
|
||||
default: all
|
||||
disable:
|
||||
- depguard
|
||||
- dupl
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forcetypeassert
|
||||
- funcorder
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocyclo
|
||||
- godox
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- ireturn
|
||||
- mnd
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- nonamedreturns
|
||||
- tagliatelle
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- wsl_v5
|
||||
settings:
|
||||
cyclop:
|
||||
max-complexity: 15
|
||||
gosec:
|
||||
excludes:
|
||||
- G101
|
||||
- G304
|
||||
- G402
|
||||
lll:
|
||||
line-length: 160
|
||||
tagliatelle:
|
||||
case:
|
||||
rules:
|
||||
json: pascal
|
||||
use-field-name: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- path: (.+)\.go$
|
||||
text: 'string `none` has (.+) occurrences, make it a constant'
|
||||
- path: (.+)\.go$
|
||||
text: 'ST1000: at least one file in a package should have a package comment'
|
||||
- path: (.+)\.go$
|
||||
text: 'package-comments: should have a package comment'
|
||||
- path: (.+)\.go$
|
||||
text: 'Error return value of `.+\.Close` is not checked'
|
||||
- linters:
|
||||
- cyclop
|
||||
path: (.+)_test\.go
|
||||
paths: []
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths: []
|
||||
+100
@@ -0,0 +1,100 @@
|
||||
version: 2
|
||||
project_name: cameradar
|
||||
dist: dist/cameradar
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
before:
|
||||
hooks:
|
||||
- go mod download
|
||||
|
||||
builds:
|
||||
- binary: cameradar
|
||||
main: ./cmd/cameradar
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- windows
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- 386
|
||||
- arm
|
||||
- arm64
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: 386
|
||||
|
||||
changelog:
|
||||
disable: true
|
||||
|
||||
checksum:
|
||||
name_template: "{{ .ProjectName }}_checksums.txt"
|
||||
|
||||
archives:
|
||||
- name_template: "{{ .Binary }}_{{ .Os }}_{{ .Arch }}{{ if .Arm}}v{{ .Arm }}{{ end }}"
|
||||
formats:
|
||||
- tar.gz
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
dockers:
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-amd64"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-amd64"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-386"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-386"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: 386
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv6"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv6"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: 6
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv7"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv7"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: 7
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-arm64"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-arm64"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
|
||||
docker_manifests:
|
||||
- name_template: "ullaakut/{{ .ProjectName }}:v{{ .Version }}"
|
||||
image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-amd64"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-386"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv6"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv7"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-arm64"
|
||||
- name_template: "ullaakut/{{ .ProjectName }}:latest"
|
||||
image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:latest-amd64"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-386"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv6"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv7"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-arm64"
|
||||
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
`contact+cameradar@glaulabs.com`.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
+113
@@ -0,0 +1,113 @@
|
||||
## Contributing
|
||||
|
||||
Thanks for helping improve Cameradar.
|
||||
Please keep changes focused and aligned with the project goals.
|
||||
|
||||
## Development setup
|
||||
|
||||
- Go 1.25 or later
|
||||
- Docker (optional, for container testing)
|
||||
|
||||
Clone the repo and install dependencies using Go modules.
|
||||
|
||||
```bash
|
||||
go mod download
|
||||
```
|
||||
|
||||
### Test against fake targets
|
||||
|
||||
Use the following options when you want reproducible local testing.
|
||||
|
||||
#### Testing discovery behavior
|
||||
|
||||
Use `scanme.nmap.org` to validate discovery-related behavior.
|
||||
|
||||
- `scanme.nmap.org` does not expose RTSP or RTSPS ports.
|
||||
- Target its open ports (for example `22`, `80`, `9929`, `31337`) to test discovery flow, reporting, and scan handling.
|
||||
|
||||
Example command:
|
||||
|
||||
```bash
|
||||
cameradar -t scanme.nmap.org -p 22
|
||||
```
|
||||
|
||||
#### Testing RTSP and attack behavior
|
||||
|
||||
Use [RTSPAllTheThings](https://github.com/Ullaakut/RTSPAllTheThings) to test RTSP-specific logic and camera attack flows.
|
||||
|
||||
- It supports both basic and digest authentication.
|
||||
- It behaves like a standards-compliant RTSP camera.
|
||||
|
||||
> [!CAUTION]
|
||||
> It is no longer maintained and has limited camera emulation coverage.
|
||||
|
||||
Example command:
|
||||
|
||||
```bash
|
||||
docker run --net=host -p 8554:8554 -e RTSP_USERNAME=admin -e RTSP_PASSWORD=12345 -e RTSP_PORT=8554 -e RTSP_AUTHENTICATION_METHOD=digest ullaakut/rtspatt
|
||||
```
|
||||
|
||||
Many real cameras slightly diverge from strict RTSP behavior. For example, some devices allow `DESCRIBE` without authentication, or return `403` and `404` in an order that differs from strict expectations.
|
||||
Unfortunately, RTSPATT cannot reproduce those behaviors.
|
||||
|
||||
#### Prefer real cameras when possible
|
||||
|
||||
The most reliable testing method is running against real cameras and real network conditions.
|
||||
|
||||
> [!CAUTION]
|
||||
> Scan only authorized targets and networks.
|
||||
|
||||
## Run tests
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
## Formatting and linting
|
||||
|
||||
Keep code idiomatic and consistent with existing style.
|
||||
By default, follow the [Uber Go Style Guide](https://github.com/uber-go/guide) and the guidelines from [Effective Go](https://go.dev/doc/effective_go).
|
||||
|
||||
```bash
|
||||
make fmt
|
||||
```
|
||||
|
||||
### Dependency for linting
|
||||
|
||||
* golangci-lint
|
||||
* see current version defined in `.github/workflows/test.yaml` at `jobs.tests.steps.["Run linter"]`
|
||||
* configured in `.golangci.yml`
|
||||
|
||||
```bash
|
||||
make lint
|
||||
```
|
||||
|
||||
## Commit messages and PR titles
|
||||
|
||||
Use [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) for commit messages and pull request titles.
|
||||
|
||||
- Use the format: `type: subject`
|
||||
- Write the subject in imperative mood: `add`, `update`, `remove`, `fix`, `refactor`
|
||||
- Do not use gerunds in subjects: avoid `adding`, `updating`, `removing`
|
||||
|
||||
Examples:
|
||||
|
||||
- `feat: add RTSP timeout flag`
|
||||
- `fix: remove duplicate progress line`
|
||||
- `docs: update commit message guidelines`
|
||||
|
||||
## Reporting issues
|
||||
|
||||
Use the issue template in [.github/ISSUE_TEMPLATE.md](.github/ISSUE_TEMPLATE.md).
|
||||
Include the version, environment, and repro steps.
|
||||
Only scan authorized targets.
|
||||
|
||||
## Pull requests
|
||||
|
||||
1. Create a feature branch from `master`.
|
||||
2. Keep PRs focused and small.
|
||||
3. Update documentation when behavior changes.
|
||||
4. Add or update tests when possible.
|
||||
5. Ensure `make test` passes.
|
||||
6. Try to bring as much test coverage as possible with your changes.
|
||||
7. Use a Conventional Commit-style PR title with an imperative subject.
|
||||
+17
@@ -0,0 +1,17 @@
|
||||
FROM alpine
|
||||
|
||||
RUN apk --update add --no-cache nmap \
|
||||
nmap-nselibs \
|
||||
nmap-scripts \
|
||||
masscan \
|
||||
libpcap \
|
||||
libpcap-dev
|
||||
|
||||
WORKDIR /app/cameradar
|
||||
|
||||
COPY cameradar /app/cameradar/cameradar
|
||||
|
||||
ENV CAMERADAR_CUSTOM_ROUTES="/app/dictionaries/routes"
|
||||
ENV CAMERADAR_CUSTOM_CREDENTIALS="/app/dictionaries/credentials.json"
|
||||
|
||||
ENTRYPOINT ["/app/cameradar/cameradar"]
|
||||
@@ -1,201 +1,17 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
@@ -0,0 +1,28 @@
|
||||
# set this e.g. via `make build GORELEASER_FLAGS="--skip=docker"` for temporary flags
|
||||
GORELEASER_FLAGS=
|
||||
|
||||
#Format
|
||||
|
||||
fmt:
|
||||
@echo "==> Formatting source"
|
||||
@gofmt -s -w $(shell find . -type f -name '*.go')
|
||||
@echo "==> Done"
|
||||
.PHONY: fmt
|
||||
|
||||
#Test
|
||||
|
||||
test:
|
||||
@go test -cover -race ./...
|
||||
.PHONY: test
|
||||
|
||||
#Lint
|
||||
|
||||
lint:
|
||||
@golangci-lint run --config=.golangci.yml ./...
|
||||
.PHONY: lint
|
||||
|
||||
#Build
|
||||
|
||||
build:
|
||||
@goreleaser release $(GORELEASER_FLAGS) --clean --snapshot
|
||||
.PHONY: build
|
||||
@@ -0,0 +1,328 @@
|
||||
## Cameradar
|
||||
|
||||
<p align="center">
|
||||
<a href="#license">
|
||||
<img src="https://img.shields.io/badge/license-MIT-blue.svg?style=flat" />
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/ullaakut/cameradar/">
|
||||
<img src="https://img.shields.io/docker/pulls/ullaakut/cameradar.svg?style=flat" />
|
||||
</a>
|
||||
<a href="https://github.com/Ullaakut/cameradar/actions">
|
||||
<img src="https://img.shields.io/github/actions/workflow/status/Ullaakut/cameradar/build.yaml" />
|
||||
</a>
|
||||
<a href='https://coveralls.io/github/Ullaakut/cameradar?branch=master'>
|
||||
<img src='https://coveralls.io/repos/github/Ullaakut/cameradar/badge.svg?branch=master' alt='Coverage Status' />
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/ullaakut/cameradar">
|
||||
<img src="https://goreportcard.com/badge/github.com/ullaakut/cameradar" />
|
||||
</a>
|
||||
<a href="https://github.com/ullaakut/cameradar/releases/latest">
|
||||
<img src="https://img.shields.io/github/release/Ullaakut/cameradar.svg?style=flat" />
|
||||
</a>
|
||||
<a href="https://pkg.go.dev/github.com/ullaakut/cameradar">
|
||||
<img src="https://godoc.org/github.com/ullaakut/cameradar?status.svg" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## RTSP stream access tool
|
||||
|
||||
Cameradar scans RTSP endpoints on authorized targets, and uses dictionary attacks to bruteforce their credentials and routes.
|
||||
|
||||
### What Cameradar does
|
||||
|
||||
- Detects open RTSP hosts on accessible targets.
|
||||
- Detects the device model that streams the RTSP feed.
|
||||
- Attempts dictionary-based discovery of stream routes (for example, `/live.sdp`).
|
||||
- Attempts dictionary-based discovery of camera credentials.
|
||||
- Produces a report of findings.
|
||||
|
||||
<p align="center"><img src="images/Cameradar.png" width="250"/></p>
|
||||
|
||||
## Table of contents
|
||||
|
||||
- [Quick start with Docker](#quick-start-with-docker)
|
||||
- [Install the binary](#install-the-binary)
|
||||
- [Install on Android (Termux)](#install-on-android-termux)
|
||||
- [Configuration](#configuration)
|
||||
- [Security and responsible use](#security-and-responsible-use)
|
||||
- [Output](#output)
|
||||
- [Check camera access](#check-camera-access)
|
||||
- [Command-line options and environment variables](#command-line-options-and-environment-variables)
|
||||
- [Input file format](#input-file-format)
|
||||
- [Build and contribute](#build-and-contribute)
|
||||
- [Frequently asked questions](#frequently-asked-questions)
|
||||
- [Examples](#examples)
|
||||
- [License](#license)
|
||||
|
||||
---
|
||||
|
||||
<p align="center"><img src="images/example.gif"/></p>
|
||||
|
||||
## Quick start with Docker
|
||||
|
||||
Install [Docker](https://docs.docker.com/engine/installation/) and run:
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host ullaakut/cameradar --targets <target>
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host ullaakut/cameradar --targets 192.168.100.0/24
|
||||
```
|
||||
|
||||
This scans ports 554, 5554, and 8554 on the target subnet.
|
||||
It attempts to enumerate RTSP streams.
|
||||
For all options, see [Configuration reference](https://github.com/Ullaakut/cameradar/wiki/Configuration-Reference).
|
||||
|
||||
- Targets can be CIDRs, IPs, IP ranges or a hostname.
|
||||
- Subnet: `172.16.100.0/24`
|
||||
- IP: `172.16.100.10`
|
||||
- Host: `localhost`
|
||||
- Range: `172.16.100.10-20`
|
||||
|
||||
- To use custom dictionaries, mount them and pass both flags:
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
-v /path/to/dictionaries:/tmp/dictionaries \
|
||||
ullaakut/cameradar \
|
||||
--custom-routes /tmp/dictionaries/my_routes \
|
||||
--custom-credentials /tmp/dictionaries/my_credentials.json \
|
||||
--targets 192.168.100.0/24
|
||||
```
|
||||
|
||||
## Install the binary
|
||||
|
||||
Use this option if Docker is not available or if you want a local build.
|
||||
|
||||
### Dependencies
|
||||
|
||||
- Go 1.25 or later
|
||||
|
||||
### Steps
|
||||
|
||||
1. `go install github.com/Ullaakut/cameradar/v6/cmd/cameradar@latest`
|
||||
|
||||
The `cameradar` binary is now in your `$GOPATH/bin`.
|
||||
For available flags, see [Configuration reference](https://github.com/Ullaakut/cameradar/wiki/Configuration-Reference).
|
||||
|
||||
## Install on Android (Termux)
|
||||
|
||||
These steps summarize a working Termux setup for Android.
|
||||
Use Termux 117 from F-Droid or the official Termux site, not Google Play.
|
||||
|
||||
### 1) Set up Termux and Alpine
|
||||
|
||||
Install the required packages in Termux:
|
||||
|
||||
```bash
|
||||
pkg update
|
||||
pkg install mc wget git nmap proot-distro
|
||||
```
|
||||
|
||||
Install Alpine and log in:
|
||||
|
||||
```bash
|
||||
proot-distro install alpine
|
||||
proot-distro login alpine
|
||||
```
|
||||
|
||||
### 2) Install build tools in Alpine
|
||||
|
||||
```bash
|
||||
apk add wget git go gcc clang musl-dev make
|
||||
```
|
||||
|
||||
### 3) Build Cameradar
|
||||
|
||||
Create a module path and clone the repo:
|
||||
|
||||
```bash
|
||||
mkdir -p go/pkg/mod/github.com/Ullaakut
|
||||
cd go/pkg/mod/github.com/Ullaakut
|
||||
git clone https://github.com/Ullaakut/cameradar.git
|
||||
cd cameradar/cmd/cameradar
|
||||
go install
|
||||
```
|
||||
|
||||
### 4) Run Cameradar
|
||||
|
||||
Copy dictionaries and run the binary:
|
||||
|
||||
```bash
|
||||
mkdir -p /tmp
|
||||
cp -r ../../dictionaries /tmp/dictionaries
|
||||
/go/bin/cameradar --targets=<target> --custom-credentials=/tmp/dictionaries/credentials.json --custom-routes=/tmp/dictionaries/routes --ui=plain --debug
|
||||
```
|
||||
|
||||
Replace `<target>` with an IP, range, host or subnet you are authorized to test.
|
||||
|
||||
## Configuration
|
||||
|
||||
The default RTSP ports are `554`, `5554`, `8554`.
|
||||
If you do not specify ports, Cameradar uses those.
|
||||
|
||||
Example of scanning custom ports:
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
ullaakut/cameradar \
|
||||
--ports "18554,19000-19010" \
|
||||
--targets localhost
|
||||
```
|
||||
|
||||
You can replace the default dictionaries with your own routes and credentials files.
|
||||
The repository provides baseline dictionaries in the `dictionaries` folder.
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
-v /my/folder/with/dictionaries:/tmp/dictionaries \
|
||||
ullaakut/cameradar \
|
||||
--custom-routes /tmp/dictionaries/my_routes \
|
||||
--custom-credentials /tmp/dictionaries/my_credentials.json \
|
||||
--targets 172.19.124.0/24
|
||||
```
|
||||
|
||||
### Skip discovery with `--skip-scan`
|
||||
|
||||
If you already know the RTSP endpoints, you can skip discovery and treat each
|
||||
target and port as a stream candidate. This mode does not run discovery and can be
|
||||
useful on restricted networks or when you want to attack a known inventory.
|
||||
|
||||
Skipping discovery means:
|
||||
|
||||
- Cameradar does not run discovery and does not detect device models.
|
||||
- Targets resolve to IP addresses. Hostnames resolve via DNS.
|
||||
- CIDR blocks and IPv4 ranges expand to every address in the range.
|
||||
- Large ranges create many targets, so use them carefully.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
ullaakut/cameradar \
|
||||
--skip-scan \
|
||||
--ports "554,8554" \
|
||||
--targets 192.168.1.10
|
||||
```
|
||||
|
||||
In this example, Cameradar attempts dictionary attacks against
|
||||
ports 554 and 8554 of `192.168.1.10`.
|
||||
|
||||
### Choose the discovery scanner with `--scanner`
|
||||
|
||||
Cameradar supports two discovery backends:
|
||||
|
||||
- `nmap` (default)
|
||||
- `masscan`
|
||||
|
||||
Use `nmap` when you want more reliable RTSP discovery: it performs service
|
||||
identification and can better distinguish RTSP from other open ports.
|
||||
|
||||
Use `masscan` when scanning very large networks: it is generally faster and
|
||||
more efficient at scale, but it does not provide service discovery.
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
ullaakut/cameradar \
|
||||
--scanner masscan \
|
||||
--ports "554,8554" \
|
||||
--targets 192.168.1.0/24
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> `--scan-speed` only applies to the `nmap` scanner.
|
||||
|
||||
## Security and responsible use
|
||||
|
||||
Cameradar is a penetration testing tool.
|
||||
Only scan networks and devices you own or have explicit permission to test.
|
||||
Do not use this tool to access unauthorized systems or streams.
|
||||
If you are unsure, stop and get written approval before scanning.
|
||||
|
||||
## Output
|
||||
|
||||
Cameradar presents results in a readable terminal UI.
|
||||
It logs findings to the console.
|
||||
The report includes discovered hosts, identified device models, and valid routes or credentials.
|
||||
If you specify a path for the `--output` flag, Cameradar also writes an M3U playlist with the discovered streams.
|
||||
|
||||
## Check camera access
|
||||
|
||||
Use [VLC Media Player](http://www.videolan.org/vlc/) to connect to a stream:
|
||||
|
||||
`rtsp://username:password@address:port/route`
|
||||
|
||||
## Input file format
|
||||
|
||||
The file can contain IPs, hostnames, IP ranges, and subnets.
|
||||
Separate entries with newlines.
|
||||
Example:
|
||||
|
||||
```text
|
||||
0.0.0.0
|
||||
localhost
|
||||
192.17.0.0/16
|
||||
192.168.1.140-255
|
||||
192.168.2-3.0-255
|
||||
```
|
||||
|
||||
When you use `--skip-scan`, Cameradar expands each entry into explicit IP
|
||||
addresses before building the target list.
|
||||
|
||||
## Command-line options and environment variables
|
||||
|
||||
The complete CLI and environment variable reference is maintained in [Configuration reference](https://github.com/Ullaakut/cameradar/wiki/Configuration-Reference).
|
||||
|
||||
This includes all supported flags, defaults, accepted values, and env var mapping.
|
||||
|
||||
## Build and contribute
|
||||
|
||||
### Docker build
|
||||
|
||||
Run the following command in the repository root:
|
||||
|
||||
`docker build . -t cameradar`
|
||||
|
||||
The resulting image is named `cameradar`.
|
||||
|
||||
### Go build
|
||||
|
||||
1. `go install github.com/Ullaakut/cameradar/v6/cmd/cameradar@latest`
|
||||
|
||||
The `cameradar` binary is now in `$GOPATH/bin/cameradar`.
|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
See [Troubleshooting & FAQ](https://github.com/Ullaakut/cameradar/wiki/Troubleshooting-%26-FAQ)
|
||||
|
||||
## Examples
|
||||
|
||||
> Running cameradar on your own machine to scan for default ports
|
||||
|
||||
`docker run --rm -t --net=host ullaakut/cameradar --targets localhost`
|
||||
|
||||
> Running cameradar with an input file, logs enabled on port 8554
|
||||
|
||||
`docker run --rm -t --net=host -v /tmp:/tmp ullaakut/cameradar --targets /tmp/test.txt --ports 8554`
|
||||
|
||||
> Running cameradar on a subnetwork with custom dictionaries, on ports 554, 5554 and 8554
|
||||
|
||||
`docker run --rm -t --net=host -v /tmp:/tmp ullaakut/cameradar --targets 192.168.0.0/24 --custom-credentials "/tmp/dictionaries/credentials.json" --custom-routes "/tmp/dictionaries/routes" --ports 554,5554,8554`
|
||||
|
||||
> Running cameradar with masscan discovery
|
||||
|
||||
`docker run --rm -t --net=host ullaakut/cameradar --scanner masscan --targets 192.168.0.0/24 --ports 554,8554`
|
||||
|
||||
## License
|
||||
|
||||
Copyright 2026 Ullaakut
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -0,0 +1,78 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Reporter reports progress and results of the application.
|
||||
type Reporter interface {
|
||||
Start(step Step, message string)
|
||||
Done(step Step, message string)
|
||||
Error(step Step, err error)
|
||||
Summary(streams []Stream, err error)
|
||||
}
|
||||
|
||||
// App scans one or more targets and attacks all RTSP streams found to get their credentials.
|
||||
type App struct {
|
||||
streamScanner StreamScanner
|
||||
attacker StreamAttacker
|
||||
reporter Reporter
|
||||
|
||||
targets []string
|
||||
ports []string
|
||||
}
|
||||
|
||||
// StreamScanner discovers RTSP streams for the given inputs.
|
||||
type StreamScanner interface {
|
||||
Scan(ctx context.Context) ([]Stream, error)
|
||||
}
|
||||
|
||||
// StreamAttacker attacks streams to discover routes and credentials.
|
||||
type StreamAttacker interface {
|
||||
Attack(ctx context.Context, streams []Stream) ([]Stream, error)
|
||||
}
|
||||
|
||||
// New creates a new App with explicit dependencies.
|
||||
func New(streamScanner StreamScanner, attacker StreamAttacker, targets, ports []string, reporter Reporter) (*App, error) {
|
||||
if streamScanner == nil {
|
||||
return nil, errors.New("stream scanner is required")
|
||||
}
|
||||
if attacker == nil {
|
||||
return nil, errors.New("stream attacker is required")
|
||||
}
|
||||
|
||||
app := &App{
|
||||
streamScanner: streamScanner,
|
||||
attacker: attacker,
|
||||
targets: targets,
|
||||
ports: ports,
|
||||
reporter: reporter,
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// Run runs the scan and prints the results.
|
||||
func (a *App) Run(ctx context.Context) error {
|
||||
a.reporter.Start(StepScan, "Scanning targets for RTSP streams")
|
||||
streams, err := a.streamScanner.Scan(ctx)
|
||||
if err != nil {
|
||||
wrapped := fmt.Errorf("discovering devices: %w", err)
|
||||
a.reporter.Error(StepScan, wrapped)
|
||||
a.reporter.Summary(streams, wrapped)
|
||||
return wrapped
|
||||
}
|
||||
a.reporter.Done(StepScan, "Scan complete")
|
||||
|
||||
streams, err = a.attacker.Attack(ctx, streams)
|
||||
if err != nil {
|
||||
wrapped := fmt.Errorf("attacking devices: %w", err)
|
||||
a.reporter.Summary(streams, wrapped)
|
||||
return wrapped
|
||||
}
|
||||
|
||||
a.reporter.Summary(streams, nil)
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,230 @@
|
||||
package cameradar_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scanner cameradar.StreamScanner
|
||||
attacker cameradar.StreamAttacker
|
||||
wantErr require.ErrorAssertionFunc
|
||||
wantMsg string
|
||||
}{
|
||||
{
|
||||
name: "missing scanner",
|
||||
scanner: nil,
|
||||
attacker: &fakeAttacker{},
|
||||
wantErr: require.Error,
|
||||
wantMsg: "stream scanner is required",
|
||||
},
|
||||
{
|
||||
name: "missing attacker",
|
||||
scanner: &fakeScanner{},
|
||||
attacker: nil,
|
||||
wantErr: require.Error,
|
||||
wantMsg: "stream attacker is required",
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
scanner: &fakeScanner{},
|
||||
attacker: &fakeAttacker{},
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
app, err := cameradar.New(test.scanner, test.attacker, []string{"target"}, []string{"554"}, &recordingReporter{})
|
||||
test.wantErr(t, err)
|
||||
if test.wantMsg != "" {
|
||||
assert.ErrorContains(t, err, test.wantMsg)
|
||||
}
|
||||
if err == nil {
|
||||
require.NotNil(t, app)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApp_Run(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
streams := []cameradar.Stream{{Port: 554}}
|
||||
attacked := []cameradar.Stream{{Port: 8554}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scanner *fakeScanner
|
||||
attacker *fakeAttacker
|
||||
wantErrContains string
|
||||
wantErrorCalls int
|
||||
wantDoneCalls int
|
||||
wantSummaryErr string
|
||||
wantSummary []cameradar.Stream
|
||||
}{
|
||||
{
|
||||
name: "success",
|
||||
scanner: &fakeScanner{
|
||||
streams: streams,
|
||||
},
|
||||
attacker: &fakeAttacker{
|
||||
streams: attacked,
|
||||
},
|
||||
wantDoneCalls: 1,
|
||||
wantSummary: attacked,
|
||||
wantSummaryErr: "",
|
||||
},
|
||||
{
|
||||
name: "scan error",
|
||||
scanner: &fakeScanner{
|
||||
streams: streams,
|
||||
err: errors.New("scan failed"),
|
||||
},
|
||||
attacker: &fakeAttacker{},
|
||||
wantErrContains: "discovering devices",
|
||||
wantErrorCalls: 1,
|
||||
wantSummary: streams,
|
||||
wantSummaryErr: "discovering devices",
|
||||
},
|
||||
{
|
||||
name: "attack error",
|
||||
scanner: &fakeScanner{
|
||||
streams: streams,
|
||||
},
|
||||
attacker: &fakeAttacker{
|
||||
err: errors.New("attack failed"),
|
||||
},
|
||||
wantErrContains: "attacking devices",
|
||||
wantDoneCalls: 1,
|
||||
wantSummary: streams,
|
||||
wantSummaryErr: "attacking devices",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
scanner := test.scanner
|
||||
attacker := test.attacker
|
||||
|
||||
app, err := cameradar.New(scanner, attacker, []string{"target"}, []string{"554"}, reporter)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = app.Run(ctx)
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, scanner.calls)
|
||||
assert.Same(t, ctx, scanner.gotCtx)
|
||||
|
||||
if test.wantErrContains == "discovering devices" {
|
||||
assert.Equal(t, 0, attacker.calls)
|
||||
} else {
|
||||
assert.Equal(t, 1, attacker.calls)
|
||||
assert.Equal(t, streams, attacker.gotStreams)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, reporter.startCalls)
|
||||
assert.Equal(t, test.wantDoneCalls, reporter.doneCalls)
|
||||
assert.Equal(t, test.wantErrorCalls, reporter.errorCalls)
|
||||
require.Equal(t, 1, reporter.summaryCalls)
|
||||
assert.Equal(t, test.wantSummary, reporter.summaryStreams)
|
||||
if test.wantSummaryErr == "" {
|
||||
assert.NoError(t, reporter.summaryErr)
|
||||
} else {
|
||||
require.Error(t, reporter.summaryErr)
|
||||
assert.ErrorContains(t, reporter.summaryErr, test.wantSummaryErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeScanner struct {
|
||||
streams []cameradar.Stream
|
||||
err error
|
||||
|
||||
calls int
|
||||
gotCtx context.Context
|
||||
gotTargets []string
|
||||
gotPorts []string
|
||||
}
|
||||
|
||||
func (f *fakeScanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
f.calls++
|
||||
f.gotCtx = ctx
|
||||
return f.streams, f.err
|
||||
}
|
||||
|
||||
type fakeAttacker struct {
|
||||
streams []cameradar.Stream
|
||||
err error
|
||||
|
||||
calls int
|
||||
gotStreams []cameradar.Stream
|
||||
}
|
||||
|
||||
func (f *fakeAttacker) Attack(_ context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
f.calls++
|
||||
f.gotStreams = append([]cameradar.Stream(nil), streams...)
|
||||
if f.err != nil {
|
||||
return streams, f.err
|
||||
}
|
||||
if f.streams != nil {
|
||||
return f.streams, nil
|
||||
}
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
startCalls int
|
||||
doneCalls int
|
||||
errorCalls int
|
||||
summaryCalls int
|
||||
summaryStreams []cameradar.Stream
|
||||
summaryErr error
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.startCalls++
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.doneCalls++
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Progress(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Debug(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.errorCalls++
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Summary(streams []cameradar.Stream, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.summaryCalls++
|
||||
r.summaryStreams = append([]cameradar.Stream(nil), streams...)
|
||||
r.summaryErr = err
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
@@ -0,0 +1,234 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/attack"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/dict"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/output"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/urfave/cli/v3"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
//nolint:cyclop // Splitting this function does not make it clearer.
|
||||
func runCameradar(ctx context.Context, cmd *cli.Command) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
targetInputs := cmd.StringSlice(flagTargets)
|
||||
if len(targetInputs) == 0 {
|
||||
return errors.New("at least one target must be specified")
|
||||
}
|
||||
|
||||
targets, err := loadTargets(targetInputs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading targets: %w", err)
|
||||
}
|
||||
if len(targets) == 0 {
|
||||
return errors.New("no valid targets provided")
|
||||
}
|
||||
|
||||
ports := cmd.StringSlice(flagPorts)
|
||||
if len(ports) == 0 {
|
||||
return errors.New("at least one port must be specified")
|
||||
}
|
||||
|
||||
var credsPath, routesPath string
|
||||
if cmd.IsSet(flagCustomCredentials) {
|
||||
credsPath = os.ExpandEnv(cmd.String(flagCustomCredentials))
|
||||
}
|
||||
if cmd.IsSet(flagCustomRoutes) {
|
||||
routesPath = os.ExpandEnv(cmd.String(flagCustomRoutes))
|
||||
}
|
||||
|
||||
dictionary, err := dict.New(credsPath, routesPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading dictionaries: %w", err)
|
||||
}
|
||||
|
||||
mode, err := cameradar.ParseMode(cmd.String(flagUI))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var outputPath string
|
||||
if cmd.IsSet(flagOutput) {
|
||||
outputPath = os.ExpandEnv(cmd.String(flagOutput))
|
||||
}
|
||||
|
||||
interactive := isInteractiveTerminal()
|
||||
buildInfo := ui.BuildInfo{Version: version, Commit: commit, Date: date}
|
||||
reporter, err := ui.NewReporter(mode, cmd.Bool(flagDebug), os.Stdout, interactive, buildInfo, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if plainReporter, ok := reporter.(*ui.PlainReporter); ok {
|
||||
resolvedMode := resolveMode(mode, interactive)
|
||||
plainReporter.PrintStartup(buildInfo, buildStartupOptions(
|
||||
targets,
|
||||
ports,
|
||||
routesPath,
|
||||
credsPath,
|
||||
outputPath,
|
||||
cmd.String(flagScanner),
|
||||
cmd.Int16(flagScanSpeed),
|
||||
cmd.Duration(flagAttackInterval),
|
||||
cmd.Duration(flagTimeout),
|
||||
cmd.Bool(flagSkipScan),
|
||||
cmd.Bool(flagDebug),
|
||||
resolvedMode,
|
||||
))
|
||||
}
|
||||
if outputPath != "" {
|
||||
reporter = output.NewM3UReporter(reporter, outputPath)
|
||||
}
|
||||
defer reporter.Close()
|
||||
|
||||
config := scan.Config{
|
||||
SkipScan: cmd.Bool(flagSkipScan),
|
||||
Targets: targets,
|
||||
Ports: ports,
|
||||
ScanSpeed: cmd.Int16(flagScanSpeed),
|
||||
Scanner: cmd.String(flagScanner),
|
||||
}
|
||||
var scanner cameradar.StreamScanner
|
||||
scanner, err = scan.New(config, reporter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating stream scanner: %w", err)
|
||||
}
|
||||
|
||||
interval := cmd.Duration(flagAttackInterval)
|
||||
timeout := cmd.Duration(flagTimeout)
|
||||
attacker, err := attack.New(dictionary, interval, timeout, reporter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating attacker: %w", err)
|
||||
}
|
||||
|
||||
c, err := cameradar.New(
|
||||
scanner,
|
||||
attacker,
|
||||
targets,
|
||||
ports,
|
||||
reporter,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating scanner: %w", err)
|
||||
}
|
||||
|
||||
return c.Run(ctx)
|
||||
}
|
||||
|
||||
func resolveMode(mode cameradar.Mode, interactive bool) cameradar.Mode {
|
||||
if mode != cameradar.ModeAuto {
|
||||
return mode
|
||||
}
|
||||
if interactive {
|
||||
return cameradar.ModeTUI
|
||||
}
|
||||
return cameradar.ModePlain
|
||||
}
|
||||
|
||||
func buildStartupOptions(
|
||||
targets []string,
|
||||
ports []string,
|
||||
routesPath string,
|
||||
credsPath string,
|
||||
outputPath string,
|
||||
scanner string,
|
||||
scanSpeed int16,
|
||||
attackInterval time.Duration,
|
||||
timeout time.Duration,
|
||||
skipScan bool,
|
||||
debug bool,
|
||||
mode cameradar.Mode,
|
||||
) []string {
|
||||
options := []string{
|
||||
"targets: " + strings.Join(targets, ", "),
|
||||
"ports: " + strings.Join(ports, ", "),
|
||||
"custom-routes: " + fallbackValue(routesPath, "builtin"),
|
||||
"custom-credentials: " + fallbackValue(credsPath, "builtin"),
|
||||
"scanner: " + fallbackValue(scanner, "nmap"),
|
||||
"scan-speed: " + strconv.FormatInt(int64(scanSpeed), 10),
|
||||
"skip-scan: " + strconv.FormatBool(skipScan),
|
||||
"attack-interval: " + attackInterval.String(),
|
||||
"timeout: " + timeout.String(),
|
||||
"debug: " + strconv.FormatBool(debug),
|
||||
"ui: " + string(mode),
|
||||
"output: " + fallbackValue(outputPath, "disabled"),
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
func fallbackValue(value, fallback string) string {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
return fallback
|
||||
}
|
||||
return trimmed
|
||||
}
|
||||
|
||||
func isInteractiveTerminal() bool {
|
||||
if !term.IsTerminal(int(os.Stdout.Fd())) {
|
||||
return false
|
||||
}
|
||||
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||
return false
|
||||
}
|
||||
|
||||
termEnv := strings.TrimSpace(os.Getenv("TERM"))
|
||||
if termEnv == "" || termEnv == "dumb" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// loadTargets merges targets from command line and file paths.
|
||||
// Valid targets are:
|
||||
// - Single IP addresses (e.g., 192.168.1.10)
|
||||
// - CIDR notations (e.g., 192.168.1.0/24)
|
||||
// - Hostnames (e.g., localhost)
|
||||
// - IP Ranges (e.g., 192.168.1.10-20)
|
||||
func loadTargets(targets []string) ([]string, error) {
|
||||
if len(targets) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var merged []string
|
||||
for _, target := range targets {
|
||||
trimmed := strings.TrimSpace(target)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := os.Stat(trimmed)
|
||||
if err != nil {
|
||||
merged = append(merged, trimmed)
|
||||
continue
|
||||
}
|
||||
|
||||
bytes, err := os.ReadFile(trimmed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading targets file %q: %w", trimmed, err)
|
||||
}
|
||||
|
||||
for line := range strings.SplitSeq(string(bytes), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
merged = append(merged, line)
|
||||
}
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
@@ -0,0 +1,157 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/debug"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ettle/strcase"
|
||||
"github.com/hamba/cmd/v3"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
flagTargets = "targets"
|
||||
flagPorts = "ports"
|
||||
flagCustomRoutes = "custom-routes"
|
||||
flagCustomCredentials = "custom-credentials"
|
||||
flagScanner = "scanner"
|
||||
flagScanSpeed = "scan-speed"
|
||||
flagAttackInterval = "attack-interval"
|
||||
flagTimeout = "timeout"
|
||||
flagSkipScan = "skip-scan"
|
||||
flagDebug = "debug"
|
||||
flagUI = "ui"
|
||||
flagOutput = "output"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "dev"
|
||||
commit = "none"
|
||||
date = "unknown"
|
||||
)
|
||||
|
||||
var flags = cmd.Flags{
|
||||
&cli.StringSliceFlag{
|
||||
Name: flagTargets,
|
||||
Usage: "The targets on which to scan for open RTSP streams in a network range format",
|
||||
Aliases: []string{"t"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagTargets)),
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: flagPorts,
|
||||
Usage: "The ports on which to search for RTSP streams",
|
||||
Aliases: []string{"p"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagPorts)),
|
||||
Value: []string{"554", "5554", "8554", "http"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagCustomRoutes,
|
||||
Usage: "The path on which to load a custom routes dictionary",
|
||||
Aliases: []string{"r"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagCustomRoutes)),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagCustomCredentials,
|
||||
Usage: "The path on which to load a custom credentials JSON dictionary",
|
||||
Aliases: []string{"c"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagCustomCredentials)),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagScanner,
|
||||
Usage: "Discovery scanner backend: nmap or masscan",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagScanner)),
|
||||
Value: "nmap",
|
||||
},
|
||||
&cli.Int16Flag{
|
||||
Name: flagScanSpeed,
|
||||
Usage: "The nmap speed preset to use for scanning (lower is stealthier)",
|
||||
Aliases: []string{"s"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagScanSpeed)),
|
||||
Value: 4,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: flagAttackInterval,
|
||||
Usage: "The interval between each attack (i.e: 2000ms, higher is stealthier)",
|
||||
Aliases: []string{"I"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagAttackInterval)),
|
||||
Value: 0,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: flagTimeout,
|
||||
Usage: "The timeout to use for attack attempts (i.e: 2000ms)",
|
||||
Aliases: []string{"T"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagTimeout)),
|
||||
Value: 2000 * time.Millisecond,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: flagSkipScan,
|
||||
Usage: "Skip discovery and treat every target and port as an RTSP stream",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagSkipScan)),
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: flagDebug,
|
||||
Usage: "Enable debug logs",
|
||||
Aliases: []string{"d"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagDebug)),
|
||||
Value: false,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagUI,
|
||||
Usage: "UI mode: auto, tui, or plain",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagUI)),
|
||||
Value: "auto",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagOutput,
|
||||
Usage: "Write discovered streams to an M3U file at the given path",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagOutput)),
|
||||
},
|
||||
}
|
||||
|
||||
func main() {
|
||||
os.Exit(realMain())
|
||||
}
|
||||
|
||||
func realMain() (code int) {
|
||||
defer func() {
|
||||
if v := recover(); v != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Panic: %v\n%s\n", v, debug.Stack())
|
||||
code = 1
|
||||
}
|
||||
}()
|
||||
|
||||
app := &cli.Command{
|
||||
Name: "Cameradar",
|
||||
Version: version,
|
||||
Usage: "Scan targets for RTSP streams",
|
||||
Flags: flags,
|
||||
Action: runCameradar,
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Print version information",
|
||||
Action: printVersion,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
err := app.Run(ctx, os.Args)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return 1
|
||||
}
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
func printVersion(ctx context.Context, _ *cli.Command) error {
|
||||
buildInfo := ui.BuildInfo{Version: version, Commit: commit, Date: date}
|
||||
nmapVersion := getNmapVersion(ctx)
|
||||
_, err := fmt.Fprintf(
|
||||
os.Stdout,
|
||||
"Version:\t%s\nCommit:\t\t%s\nBuild date:\t%s\nNmap:\t\t%s\n",
|
||||
buildInfo.DisplayVersion(),
|
||||
buildInfo.ShortCommit(),
|
||||
buildInfo.Date,
|
||||
nmapVersion,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const unknownVersion = "unknown"
|
||||
|
||||
func getNmapVersion(ctx context.Context) string {
|
||||
output, err := exec.CommandContext(ctx, "nmap", "--version").Output()
|
||||
if err != nil {
|
||||
return unknownVersion
|
||||
}
|
||||
|
||||
lines := strings.SplitN(string(output), "\n", 2)
|
||||
firstLine := strings.TrimSpace(lines[0])
|
||||
const prefix = "Nmap version "
|
||||
if !strings.HasPrefix(firstLine, prefix) {
|
||||
return unknownVersion
|
||||
}
|
||||
|
||||
versionPart := strings.TrimSpace(strings.TrimPrefix(firstLine, prefix))
|
||||
fields := strings.Fields(versionPart)
|
||||
if len(fields) == 0 {
|
||||
return unknownVersion
|
||||
}
|
||||
return fields[0]
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
0.0.0.0
|
||||
localhost
|
||||
192.17.0.0/16
|
||||
192.168.1.140-255
|
||||
192.168.2-3.0-255
|
||||
@@ -0,0 +1,91 @@
|
||||
module github.com/Ullaakut/cameradar/v6
|
||||
|
||||
go 1.25.8
|
||||
|
||||
require (
|
||||
github.com/Ullaakut/masscan v1.0.0
|
||||
github.com/Ullaakut/nmap/v4 v4.0.0
|
||||
github.com/bluenviron/gortsplib/v5 v5.5.1
|
||||
github.com/charmbracelet/bubbles v1.0.0
|
||||
github.com/charmbracelet/bubbletea v1.3.10
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/ettle/strcase v0.2.0
|
||||
github.com/hamba/cmd/v3 v3.1.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/urfave/cli/v3 v3.8.0
|
||||
golang.org/x/term v0.42.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/metrics v1.41.2 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bluenviron/mediacommon/v2 v2.8.3 // indirect
|
||||
github.com/cactus/go-statsd-client/v5 v5.1.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.4.1 // indirect
|
||||
github.com/charmbracelet/harmonica v0.2.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.11.6 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.2 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.9.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/go4org/hashtriemap v0.0.0-20251130024219-545ba229f689 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/grafana/pyroscope-go v1.2.7 // indirect
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
|
||||
github.com/hamba/logger/v2 v2.9.1 // indirect
|
||||
github.com/hamba/statter/v2 v2.8.1 // indirect
|
||||
github.com/klauspost/compress v1.18.4 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pion/logging v0.2.4 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.16 // indirect
|
||||
github.com/pion/rtp v1.10.1 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.18 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.10 // indirect
|
||||
github.com/pion/transport/v4 v4.0.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/valyala/fastrand v1.1.0 // indirect
|
||||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.43.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.43.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.52.0 // indirect
|
||||
golang.org/x/sys v0.43.0 // indirect
|
||||
golang.org/x/text v0.35.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
|
||||
google.golang.org/grpc v1.80.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
@@ -0,0 +1,275 @@
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
|
||||
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
|
||||
github.com/Ullaakut/masscan v1.0.0 h1:+YtpxNcIEaB2lMWNy+oDZF+5pP86S7vSzCKMjW6UDDA=
|
||||
github.com/Ullaakut/masscan v1.0.0/go.mod h1:2LQUQ88hmdXZ+JqQTx6RaszuZDRIAwjEoUL+sVXCAe8=
|
||||
github.com/Ullaakut/nmap/v4 v4.0.0 h1:QwpxX5F+S14ZEvBQKc37xnvpPXcw4vK0rsZkGV4h98s=
|
||||
github.com/Ullaakut/nmap/v4 v4.0.0/go.mod h1:B+MtOtHdb+jR9bc11BNwZX1QVHOtsDjfKkXMCZtRzbw=
|
||||
github.com/VictoriaMetrics/metrics v1.41.2 h1:pLQ4Mw9TqXFq3ZsZVJkz88JHpjL9LY5NHTY3v2gBNAw=
|
||||
github.com/VictoriaMetrics/metrics v1.41.2/go.mod h1:xDM82ULLYCYdFRgQ2JBxi8Uf1+8En1So9YUwlGTOqTc=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY=
|
||||
github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bluenviron/gortsplib/v5 v5.5.1 h1:jZJb1dLsBMhxW1cFz/CwLhUW5diaqHf/617QyQGx4nY=
|
||||
github.com/bluenviron/gortsplib/v5 v5.5.1/go.mod h1:otcPqR836QZej/EYx7njn8vl4TLj8Ya3QAf+GBwh2cQ=
|
||||
github.com/bluenviron/mediacommon/v2 v2.8.3 h1:T6xb7ZK3eBixi/HynzhtGRCEIrazwcmGIeu0WDTVISY=
|
||||
github.com/bluenviron/mediacommon/v2 v2.8.3/go.mod h1:CsYjGgzIz8RbloQf4BHR4uReogZsB4PEKWfePVIzJv8=
|
||||
github.com/cactus/go-statsd-client/v5 v5.1.0 h1:sbbdfIl9PgisjEoXzvXI1lwUKWElngsjJKaZeC021P4=
|
||||
github.com/cactus/go-statsd-client/v5 v5.1.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbles v1.0.0 h1:12J8/ak/uCZEMQ6KU7pcfwceyjLlWsDLAxB5fXonfvc=
|
||||
github.com/charmbracelet/bubbles v1.0.0/go.mod h1:9d/Zd5GdnauMI5ivUIVisuEm3ave1XwXtD1ckyV6r3E=
|
||||
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
|
||||
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
|
||||
github.com/charmbracelet/colorprofile v0.4.1 h1:a1lO03qTrSIRaK8c3JRxJDZOvhvIeSco3ej+ngLk1kk=
|
||||
github.com/charmbracelet/colorprofile v0.4.1/go.mod h1:U1d9Dljmdf9DLegaJ0nGZNJvoXAhayhmidOdcBwAvKk=
|
||||
github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ=
|
||||
github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/x/ansi v0.11.6 h1:GhV21SiDz/45W9AnV2R61xZMRri5NlLnl6CVF7ihZW8=
|
||||
github.com/charmbracelet/x/ansi v0.11.6/go.mod h1:2JNYLgQUsyqaiLovhU2Rv/pb8r6ydXKS3NIttu3VGZQ=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15 h1:ur3pZy0o6z/R7EylET877CBxaiE1Sp1GMxoFPAIztPI=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.15/go.mod h1:J1YVbR7MUuEGIFPCaaZ96KDl5NoS0DAWkskup+mOY+Q=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
|
||||
github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk=
|
||||
github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI=
|
||||
github.com/clipperhouse/displaywidth v0.9.0 h1:Qb4KOhYwRiN3viMv1v/3cTBlz3AcAZX3+y9OLhMtAtA=
|
||||
github.com/clipperhouse/displaywidth v0.9.0/go.mod h1:aCAAqTlh4GIVkhQnJpbL0T/WfcrJXHcj8C0yjYcjOZA=
|
||||
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
|
||||
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
|
||||
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
|
||||
github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes=
|
||||
github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
|
||||
github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q=
|
||||
github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go4org/hashtriemap v0.0.0-20251130024219-545ba229f689 h1:0psnKZ+N2IP43/SZC8SKx6OpFJwLmQb9m9QyV9BC2f8=
|
||||
github.com/go4org/hashtriemap v0.0.0-20251130024219-545ba229f689/go.mod h1:OGmRfY/9QEK2P5zCRtmqfbCF283xPkU2dvVA4MvbvpI=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/pyroscope-go v1.2.7 h1:VWBBlqxjyR0Cwk2W6UrE8CdcdD80GOFNutj0Kb1T8ac=
|
||||
github.com/grafana/pyroscope-go v1.2.7/go.mod h1:o/bpSLiJYYP6HQtvcoVKiE9s5RiNgjYTj1DhiddP2Pc=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
|
||||
github.com/hamba/cmd/v3 v3.1.1 h1:/rJj6bK6ew0zM31I6s2mwtYKSu4BSsd4PxB/dKuwJyA=
|
||||
github.com/hamba/cmd/v3 v3.1.1/go.mod h1:w1ZhSByZcrL6oB0gkxLeW8wqX+kAbkKf3GiYz/5Kl7I=
|
||||
github.com/hamba/logger/v2 v2.9.1 h1:NRV+6j0SEdGag1DkjWtV/k3JGOFAByx6IEc/nJNpYLs=
|
||||
github.com/hamba/logger/v2 v2.9.1/go.mod h1:IveSM7xeUVbtmlgXsXoAdNvhQ+JG1CgFMBlKG7hRH/4=
|
||||
github.com/hamba/statter/v2 v2.8.1 h1:Y6mEOXPxBLfBvKzb31BjPhtSLyza/ghFu+Kez7t0CaY=
|
||||
github.com/hamba/statter/v2 v2.8.1/go.mod h1:DTwNCeix6cqciNDhT8CzzKa5k2nCWPWGjIAru4jRtpA=
|
||||
github.com/hamba/testutils v0.7.0 h1:GQ0RJbz4+aFauvEV5AFgPMOKltl8gWZVbzROS5b9qDc=
|
||||
github.com/hamba/testutils v0.7.0/go.mod h1:5rw9ZvxgDegvi9j32U5s5LBDrOBhrCu4g53EM03KOF4=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
|
||||
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw=
|
||||
github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
|
||||
github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo=
|
||||
github.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo=
|
||||
github.com/pion/rtp v1.10.1 h1:xP1prZcCTUuhO2c83XtxyOHJteISg6o8iPsE2acaMtA=
|
||||
github.com/pion/rtp v1.10.1/go.mod h1:rF5nS1GqbR7H/TCpKwylzeq6yDM+MM6k+On5EgeThEM=
|
||||
github.com/pion/sdp/v3 v3.0.18 h1:l0bAXazKHpepazVdp+tPYnrsy9dfh7ZbT8DxesH5ZnI=
|
||||
github.com/pion/sdp/v3 v3.0.18/go.mod h1:ZREGo6A9ZygQ9XkqAj5xYCQtQpif0i6Pa81HOiAdqQ8=
|
||||
github.com/pion/srtp/v3 v3.0.10 h1:tFirkpBb3XccP5VEXLi50GqXhv5SKPxqrdlhDCJlZrQ=
|
||||
github.com/pion/srtp/v3 v3.0.10/go.mod h1:3mOTIB0cq9qlbn59V4ozvv9ClW/BSEbRp4cY0VtaR7M=
|
||||
github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o=
|
||||
github.com/pion/transport/v4 v4.0.1/go.mod h1:nEuEA4AD5lPdcIegQDpVLgNoDGreqM/YqmEx3ovP4jM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U=
|
||||
github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/urfave/cli/v3 v3.8.0 h1:XqKPrm0q4P0q5JpoclYoCAv0/MIvH/jZ2umzuf8pNTI=
|
||||
github.com/urfave/cli/v3 v3.8.0/go.mod h1:ysVLtOEmg2tOy6PknnYVhDoouyC/6N42TMeoMzskhso=
|
||||
github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
|
||||
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
|
||||
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||
go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=
|
||||
go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0 h1:zWWrB1U6nqhS/k6zYB74CjRpuiitRtLLi68VcgmOEto=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.42.0/go.mod h1:2qXPNBX1OVRC0IwOnfo1ljoid+RD0QK3443EaqVlsOU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak=
|
||||
go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=
|
||||
go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=
|
||||
go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=
|
||||
go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=
|
||||
go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=
|
||||
go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=
|
||||
go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
||||
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
|
||||
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||
golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
|
||||
golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
|
||||
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
|
||||
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
|
||||
google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=
|
||||
google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 746 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 325 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 1.5 MiB |
@@ -0,0 +1,430 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/bluenviron/gortsplib/v5"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/description"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/liberrors"
|
||||
)
|
||||
|
||||
// Route that should never be a constructor default.
|
||||
const dummyRoute = "0x8b6c42"
|
||||
|
||||
// Dictionary provides dictionaries for routes, usernames and passwords.
|
||||
type Dictionary interface {
|
||||
Routes() []string
|
||||
Usernames() []string
|
||||
Passwords() []string
|
||||
}
|
||||
|
||||
// Reporter reports progress and results of the attacks.
|
||||
type Reporter interface {
|
||||
Start(step cameradar.Step, message string)
|
||||
Done(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
Error(step cameradar.Step, err error)
|
||||
Debug(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// Attacker attempts to discover routes and credentials for RTSP streams.
|
||||
type Attacker struct {
|
||||
dictionary Dictionary
|
||||
reporter Reporter
|
||||
attackInterval time.Duration
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// New builds an Attacker with the provided dependencies.
|
||||
func New(dict Dictionary, attackInterval, timeout time.Duration, reporter Reporter) (Attacker, error) {
|
||||
if dict == nil {
|
||||
return Attacker{}, errors.New("dictionary is required")
|
||||
}
|
||||
|
||||
return Attacker{
|
||||
dictionary: dict,
|
||||
attackInterval: attackInterval,
|
||||
timeout: timeout,
|
||||
reporter: reporter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Attack attacks the given targets and returns the accessed streams.
|
||||
func (a Attacker) Attack(ctx context.Context, targets []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
if len(targets) == 0 {
|
||||
return nil, errors.New("no stream found")
|
||||
}
|
||||
|
||||
streams, err := a.attackRoutesPhase(ctx, targets)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
streams, err = a.detectAuthPhase(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
streams, err = a.attackCredentialsPhase(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
streams, err = a.validateStreamsPhase(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
// Some cameras run an inaccurate version of the RTSP protocol which prioritizes 401 over 404.
|
||||
// For these cameras, running another route attack solves the problem.
|
||||
if !needsReattack(streams) {
|
||||
return streams, nil
|
||||
}
|
||||
streams, err = a.reattackRoutes(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) attackRoutesPhase(ctx context.Context, targets []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepAttackRoutes, "Attacking RTSP routes")
|
||||
routeAttempts := (len(a.dictionary.Routes()) + 1) * len(targets)
|
||||
if routeAttempts > 0 {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, cameradar.ProgressTotalMessage(routeAttempts))
|
||||
}
|
||||
|
||||
streams, err := runParallel(ctx, targets, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.attackRoutesForStream(ctx, target, true)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepAttackRoutes, err)
|
||||
return streams, fmt.Errorf("attacking routes: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepAttackRoutes, "Finished route attacks")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) detectAuthPhase(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepDetectAuth, "Detecting authentication methods")
|
||||
if len(streams) > 0 {
|
||||
a.reporter.Progress(cameradar.StepDetectAuth, cameradar.ProgressTotalMessage(len(streams)))
|
||||
}
|
||||
streams, err := a.detectAuthMethods(ctx, streams)
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepDetectAuth, err)
|
||||
return streams, fmt.Errorf("detecting authentication methods: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepDetectAuth, "Authentication detection complete")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) attackCredentialsPhase(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepAttackCredentials, "Attacking credentials")
|
||||
credentialsAttempts := len(streams) * len(a.dictionary.Usernames()) * len(a.dictionary.Passwords())
|
||||
if credentialsAttempts > 0 {
|
||||
a.reporter.Progress(cameradar.StepAttackCredentials, cameradar.ProgressTotalMessage(credentialsAttempts))
|
||||
}
|
||||
streams, err := runParallel(ctx, streams, a.attackCredentialsForStream)
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepAttackCredentials, err)
|
||||
return streams, fmt.Errorf("attacking credentials: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepAttackCredentials, "Credential attacks complete")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) validateStreamsPhase(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepValidateStreams, "Validating streams")
|
||||
if len(streams) > 0 {
|
||||
a.reporter.Progress(cameradar.StepValidateStreams, cameradar.ProgressTotalMessage(len(streams)))
|
||||
}
|
||||
streams, err := runParallel(ctx, streams, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.validateStream(ctx, target, true)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepValidateStreams, err)
|
||||
return streams, fmt.Errorf("validating streams: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepValidateStreams, "Stream validation complete")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) reattackRoutes(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, "Re-attacking routes for partial results")
|
||||
updated, err := runParallel(ctx, streams, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.attackRoutesForStream(ctx, target, false)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepAttackRoutes, err)
|
||||
return streams, fmt.Errorf("attacking routes: %w", err)
|
||||
}
|
||||
|
||||
updated, err = runParallel(ctx, updated, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.validateStream(ctx, target, false)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepValidateStreams, err)
|
||||
return updated, fmt.Errorf("validating streams: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, updated)
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
func needsReattack(streams []cameradar.Stream) bool {
|
||||
for _, stream := range streams {
|
||||
if stream.RouteFound && stream.CredentialsFound && stream.Available {
|
||||
// This stream is fully discovered, no need to re-attack.
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type summaryUpdater interface {
|
||||
UpdateSummary(streams []cameradar.Stream)
|
||||
}
|
||||
|
||||
func updateSummary(reporter Reporter, streams []cameradar.Stream) {
|
||||
updater, ok := reporter.(summaryUpdater)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
|
||||
func (a Attacker) attackCredentialsForStream(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
for _, username := range a.dictionary.Usernames() {
|
||||
for _, password := range a.dictionary.Passwords() {
|
||||
if ctx.Err() != nil {
|
||||
return target, ctx.Err()
|
||||
}
|
||||
|
||||
a.reporter.Progress(cameradar.StepAttackCredentials, cameradar.ProgressTickMessage())
|
||||
ok, err := a.credAttack(target, username, password)
|
||||
if err != nil {
|
||||
target.CredentialsFound = false
|
||||
|
||||
msg := fmt.Sprintf("credential attempt failed for %s:%d (%s:%s): %v", target.Address.String(), target.Port, username, password, err)
|
||||
a.reporter.Debug(cameradar.StepAttackCredentials, msg)
|
||||
|
||||
return target, nil
|
||||
}
|
||||
|
||||
if ok {
|
||||
target.CredentialsFound = true
|
||||
target.Username = username
|
||||
target.Password = password
|
||||
|
||||
msg := fmt.Sprintf("Credentials found for %s:%d", target.Address.String(), target.Port)
|
||||
a.reporter.Progress(cameradar.StepAttackCredentials, msg)
|
||||
|
||||
return target, nil
|
||||
}
|
||||
time.Sleep(a.attackInterval)
|
||||
}
|
||||
}
|
||||
|
||||
target.CredentialsFound = false
|
||||
return target, nil
|
||||
}
|
||||
|
||||
func (a Attacker) attackRoutesForStream(ctx context.Context, target cameradar.Stream, emitProgress bool) (cameradar.Stream, error) {
|
||||
if target.RouteFound {
|
||||
return target, nil
|
||||
}
|
||||
|
||||
if emitProgress {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, cameradar.ProgressTickMessage())
|
||||
}
|
||||
ok, err := a.routeAttack(target, dummyRoute)
|
||||
if err != nil {
|
||||
a.reporter.Debug(cameradar.StepAttackRoutes, fmt.Sprintf("route probe failed for %s:%d: %v", target.Address.String(), target.Port, err))
|
||||
return target, nil
|
||||
}
|
||||
if ok {
|
||||
target.RouteFound = true
|
||||
target.Routes = append(target.Routes, "") // Add empty route for default.
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, fmt.Sprintf("Default route accepted for %s:%d", target.Address.String(), target.Port))
|
||||
return target, nil
|
||||
}
|
||||
|
||||
for _, route := range a.dictionary.Routes() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return target, ctx.Err()
|
||||
case <-time.After(a.attackInterval):
|
||||
}
|
||||
|
||||
if emitProgress {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, cameradar.ProgressTickMessage())
|
||||
}
|
||||
ok, err := a.routeAttack(target, route)
|
||||
if err != nil {
|
||||
a.reporter.Debug(cameradar.StepAttackRoutes, fmt.Sprintf("route attempt failed for %s:%d (%s): %v", target.Address.String(), target.Port, route, err))
|
||||
return target, nil
|
||||
}
|
||||
if ok {
|
||||
target.RouteFound = true
|
||||
target.Routes = append(target.Routes, route)
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, fmt.Sprintf("Route found for %s:%d -> %s", target.Address.String(), target.Port, route))
|
||||
}
|
||||
}
|
||||
|
||||
return target, nil
|
||||
}
|
||||
|
||||
func (a Attacker) routeAttack(stream cameradar.Stream, route string) (bool, error) {
|
||||
stream.Routes = []string{route}
|
||||
code, err := a.describeStatus(stream)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("performing describe request at %q: %w", stream, err)
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepAttackRoutes, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", stream, code))
|
||||
access := code == base.StatusOK || code == base.StatusUnauthorized || code == base.StatusForbidden
|
||||
return access, nil
|
||||
}
|
||||
|
||||
func (a Attacker) credAttack(stream cameradar.Stream, username, password string) (bool, error) {
|
||||
stream.Username = username
|
||||
stream.Password = password
|
||||
code, err := a.describeStatus(stream)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("performing describe request at %q: %w", stream, err)
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepAttackCredentials, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", stream, code))
|
||||
return code == base.StatusOK || code == base.StatusNotFound, nil
|
||||
}
|
||||
|
||||
func (a Attacker) validateStream(ctx context.Context, stream cameradar.Stream, emitProgress bool) (cameradar.Stream, error) {
|
||||
if emitProgress {
|
||||
defer a.reporter.Progress(cameradar.StepValidateStreams, cameradar.ProgressTickMessage())
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return stream, ctx.Err()
|
||||
}
|
||||
|
||||
client, err := a.newRTSPClient(stream)
|
||||
if err != nil {
|
||||
return stream, fmt.Errorf("starting rtsp client: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
desc, res, err := a.describeWithRetry(ctx, client, stream)
|
||||
if err != nil {
|
||||
return a.handleDescribeError(stream, err)
|
||||
}
|
||||
a.logDescribeResponse(stream.String(), res)
|
||||
|
||||
if desc == nil || len(desc.Medias) == 0 {
|
||||
return stream, fmt.Errorf("no media tracks found for %q", stream)
|
||||
}
|
||||
|
||||
res, err = client.Setup(desc.BaseURL, desc.Medias[0], 0, 0)
|
||||
if err != nil {
|
||||
return a.handleSetupError(stream, err)
|
||||
}
|
||||
a.logSetupResponse(stream.String(), res)
|
||||
|
||||
stream.Available = res != nil && res.StatusCode == base.StatusOK
|
||||
if stream.Available {
|
||||
a.reporter.Progress(cameradar.StepValidateStreams, fmt.Sprintf("Stream validated for %s:%d", stream.Address.String(), stream.Port))
|
||||
}
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func (a Attacker) describeWithRetry(ctx context.Context, client *gortsplib.Client, stream cameradar.Stream) (*description.Session, *base.Response, error) {
|
||||
u, err := stream.URL()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
desc *description.Session
|
||||
res *base.Response
|
||||
)
|
||||
for range 5 {
|
||||
desc, res, err = client.Describe(u)
|
||||
if err == nil {
|
||||
return desc, res, nil
|
||||
}
|
||||
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) && badStatus.Code == base.StatusServiceUnavailable {
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d (retrying)", stream, badStatus.Code))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf("describe retries exhausted for %q: %w", stream, err)
|
||||
}
|
||||
|
||||
func (a Attacker) handleDescribeError(stream cameradar.Stream, err error) (cameradar.Stream, error) {
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) && badStatus.Code == base.StatusServiceUnavailable {
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", stream, badStatus.Code))
|
||||
a.reporter.Progress(cameradar.StepValidateStreams, fmt.Sprintf("Stream unavailable for %s:%d (RTSP %d)",
|
||||
stream.Address.String(),
|
||||
stream.Port,
|
||||
badStatus.Code,
|
||||
))
|
||||
stream.Available = false
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > error: %v", stream, err))
|
||||
|
||||
return stream, fmt.Errorf("performing describe request at %q: %w", stream, err)
|
||||
}
|
||||
|
||||
func (a Attacker) handleSetupError(stream cameradar.Stream, err error) (cameradar.Stream, error) {
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) {
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("SETUP %s RTSP/1.0 > %d", stream, badStatus.Code))
|
||||
stream.Available = badStatus.Code == base.StatusOK
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
return stream, fmt.Errorf("performing setup request at %q: %w", stream, err)
|
||||
}
|
||||
|
||||
func (a Attacker) logDescribeResponse(urlStr string, res *base.Response) {
|
||||
if res == nil {
|
||||
return
|
||||
}
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", urlStr, res.StatusCode))
|
||||
}
|
||||
|
||||
func (a Attacker) logSetupResponse(urlStr string, res *base.Response) {
|
||||
if res == nil {
|
||||
return
|
||||
}
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("SETUP %s RTSP/1.0 > %d", urlStr, res.StatusCode))
|
||||
}
|
||||
@@ -0,0 +1,388 @@
|
||||
package attack_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/attack"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dict attack.Dictionary
|
||||
wantErr require.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "rejects nil dictionary",
|
||||
dict: nil,
|
||||
wantErr: require.Error,
|
||||
},
|
||||
{
|
||||
name: "accepts dictionary",
|
||||
dict: testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
},
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
attacker, err := attack.New(test.dict, 10*time.Millisecond, time.Second, ui.NopReporter{})
|
||||
test.wantErr(t, err)
|
||||
if err != nil {
|
||||
assert.NotNil(t, attacker)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_BasicAuth(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user", "other"},
|
||||
passwords: []string{"pass", "bad"},
|
||||
}
|
||||
|
||||
testInterval := time.Millisecond
|
||||
testRequestTimeout := time.Second
|
||||
attacker, err := attack.New(dict, testInterval, testRequestTimeout, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 1)
|
||||
|
||||
assert.True(t, got[0].RouteFound)
|
||||
assert.True(t, got[0].CredentialsFound)
|
||||
assert.True(t, got[0].Available)
|
||||
assert.Equal(t, cameradar.AuthBasic, got[0].AuthenticationType)
|
||||
assert.Equal(t, "user", got[0].Username)
|
||||
assert.Equal(t, "pass", got[0].Password)
|
||||
assert.Contains(t, got[0].Routes, "stream")
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_AuthVariants(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config rtspServerConfig
|
||||
dict testDictionary
|
||||
wantAuthType cameradar.AuthType
|
||||
wantRoute bool
|
||||
wantCreds bool
|
||||
wantAvail bool
|
||||
wantErr require.ErrorAssertionFunc
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "no authentication",
|
||||
config: rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
},
|
||||
dict: testDictionary{
|
||||
routes: []string{"stream"},
|
||||
},
|
||||
wantAuthType: cameradar.AuthNone,
|
||||
wantRoute: true,
|
||||
wantCreds: false,
|
||||
wantAvail: true,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "digest authentication",
|
||||
config: rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodDigest,
|
||||
},
|
||||
dict: testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
},
|
||||
wantAuthType: cameradar.AuthDigest,
|
||||
wantRoute: true,
|
||||
wantCreds: true,
|
||||
wantAvail: true,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, test.config)
|
||||
|
||||
attacker, err := attack.New(test.dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
test.wantErr(t, err)
|
||||
|
||||
if test.errContains != "" {
|
||||
assert.ErrorContains(t, err, test.errContains)
|
||||
}
|
||||
|
||||
require.Len(t, got, 1)
|
||||
assert.Equal(t, test.wantAuthType, got[0].AuthenticationType)
|
||||
assert.Equal(t, test.wantRoute, got[0].RouteFound)
|
||||
assert.Equal(t, test.wantCreds, got[0].CredentialsFound)
|
||||
assert.Equal(t, test.wantAvail, got[0].Available)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ValidationErrors(t *testing.T) {
|
||||
attacker, err := attack.New(testDictionary{routes: []string{"stream"}}, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
attacker attack.Attacker
|
||||
targets []cameradar.Stream
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "fails with no targets",
|
||||
attacker: attacker,
|
||||
targets: nil,
|
||||
wantErr: "no stream found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := test.attacker.Attack(t.Context(), test.targets)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ReturnsErrorWhenRouteMissing(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"missing"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "validating streams")
|
||||
require.Len(t, got, 1)
|
||||
assert.False(t, got[0].RouteFound)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ReturnsErrorWhenCredentialsMissing(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"wrong"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "validating streams")
|
||||
require.Len(t, got, 1)
|
||||
assert.Equal(t, cameradar.AuthBasic, got[0].AuthenticationType)
|
||||
assert.False(t, got[0].CredentialsFound)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_CredentialAttemptFails(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
failOnAuth: true,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, reporter)
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "validating streams")
|
||||
require.Len(t, got, 1)
|
||||
assert.False(t, got[0].CredentialsFound)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_AllowsDummyRoute(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowAll: true,
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 1)
|
||||
assert.True(t, got[0].RouteFound)
|
||||
assert.Equal(t, []string{""}, got[0].Routes)
|
||||
assert.True(t, got[0].Available)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ValidationFailsWhenSetupErrors(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
setupStatus: base.StatusUnsupportedTransport,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 1)
|
||||
assert.False(t, got[0].Available)
|
||||
assert.True(t, got[0].RouteFound)
|
||||
}
|
||||
|
||||
type testDictionary struct {
|
||||
routes []string
|
||||
usernames []string
|
||||
passwords []string
|
||||
}
|
||||
|
||||
func (d testDictionary) Routes() []string {
|
||||
return d.routes
|
||||
}
|
||||
|
||||
func (d testDictionary) Usernames() []string {
|
||||
return d.usernames
|
||||
}
|
||||
|
||||
func (d testDictionary) Passwords() []string {
|
||||
return d.passwords
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
debugMessages []string
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Progress(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Debug(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.debugMessages = append(r.debugMessages, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
func (r *recordingReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
|
||||
func (r *recordingReporter) HasDebugContaining(value string) bool {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
for _, message := range r.debugMessages {
|
||||
if strings.Contains(message, value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
)
|
||||
|
||||
func (a Attacker) detectAuthMethods(ctx context.Context, targets []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
streams, err := runParallel(ctx, targets, a.detectAuthMethod)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
for i := range streams {
|
||||
a.reporter.Progress(cameradar.StepDetectAuth, cameradar.ProgressTickMessage())
|
||||
|
||||
var authMethod string
|
||||
switch streams[i].AuthenticationType {
|
||||
case cameradar.AuthNone:
|
||||
authMethod = "no"
|
||||
case cameradar.AuthBasic:
|
||||
authMethod = "basic"
|
||||
case cameradar.AuthDigest:
|
||||
authMethod = "digest"
|
||||
case cameradar.AuthUnknown:
|
||||
authMethod = "unknown"
|
||||
default:
|
||||
authMethod = fmt.Sprintf("unknown (%d)", streams[i].AuthenticationType)
|
||||
}
|
||||
|
||||
a.reporter.Progress(cameradar.StepDetectAuth, fmt.Sprintf("Detected %s authentication for %s:%d", authMethod, streams[i].Address.String(), streams[i].Port))
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) detectAuthMethod(ctx context.Context, stream cameradar.Stream) (cameradar.Stream, error) {
|
||||
if ctx.Err() != nil {
|
||||
return stream, ctx.Err()
|
||||
}
|
||||
u, err := stream.URL()
|
||||
if err != nil {
|
||||
return stream, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
|
||||
statusCode, headers, err := a.probeDescribeHeaders(ctx, u)
|
||||
if err != nil {
|
||||
a.reporter.Debug(cameradar.StepDetectAuth, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > error: %v", u, err))
|
||||
if stream.Scheme == schemeHTTP || stream.Scheme == schemeHTTPS {
|
||||
statusCode, statusErr := a.describeStatus(stream)
|
||||
if statusErr == nil {
|
||||
a.reporter.Debug(cameradar.StepDetectAuth, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d (fallback)", u, statusCode))
|
||||
stream.AuthenticationType = authTypeFromStatus(statusCode, nil)
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
stream.AuthenticationType = cameradar.AuthUnknown
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
stream.AuthenticationType = cameradar.AuthUnknown
|
||||
return stream, fmt.Errorf("performing describe request at %q: %w", u, err)
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepDetectAuth, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", u, statusCode))
|
||||
values := headerValues(headers, "WWW-Authenticate")
|
||||
stream.AuthenticationType = authTypeFromStatus(statusCode, values)
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func authTypeFromStatus(statusCode base.StatusCode, wwwAuthenticate base.HeaderValue) cameradar.AuthType {
|
||||
switch statusCode {
|
||||
case base.StatusOK:
|
||||
return cameradar.AuthNone
|
||||
case base.StatusUnauthorized:
|
||||
return authTypeFromHeaders(wwwAuthenticate)
|
||||
default:
|
||||
return cameradar.AuthUnknown
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,379 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testDictionary struct {
|
||||
routes []string
|
||||
usernames []string
|
||||
passwords []string
|
||||
}
|
||||
|
||||
func (d testDictionary) Routes() []string {
|
||||
return d.routes
|
||||
}
|
||||
|
||||
func (d testDictionary) Usernames() []string {
|
||||
return d.usernames
|
||||
}
|
||||
|
||||
func (d testDictionary) Passwords() []string {
|
||||
return d.passwords
|
||||
}
|
||||
|
||||
func TestAuthTypeFromHeaders(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
values base.HeaderValue
|
||||
want cameradar.AuthType
|
||||
}{
|
||||
{
|
||||
name: "digest wins over basic",
|
||||
values: base.HeaderValue{
|
||||
headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal()[0],
|
||||
headers.Authenticate{Method: headers.AuthMethodDigest, Realm: "cam", Nonce: "nonce"}.Marshal()[0],
|
||||
},
|
||||
want: cameradar.AuthDigest,
|
||||
},
|
||||
{
|
||||
name: "basic auth",
|
||||
values: headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
want: cameradar.AuthBasic,
|
||||
},
|
||||
{
|
||||
name: "digest auth",
|
||||
values: headers.Authenticate{Method: headers.AuthMethodDigest, Realm: "cam", Nonce: "nonce"}.Marshal(),
|
||||
want: cameradar.AuthDigest,
|
||||
},
|
||||
{
|
||||
name: "unknown with empty values",
|
||||
values: nil,
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
{
|
||||
name: "unknown with unsupported header",
|
||||
values: base.HeaderValue{"Bearer abc"},
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.want, authTypeFromHeaders(test.values))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthTypeFromStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
statusCode base.StatusCode
|
||||
headers base.HeaderValue
|
||||
wantAuthType cameradar.AuthType
|
||||
}{
|
||||
{
|
||||
name: "status ok means no auth",
|
||||
statusCode: base.StatusOK,
|
||||
wantAuthType: cameradar.AuthNone,
|
||||
},
|
||||
{
|
||||
name: "status unauthorized with basic",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
wantAuthType: cameradar.AuthBasic,
|
||||
},
|
||||
{
|
||||
name: "status unauthorized with digest",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: headers.Authenticate{Method: headers.AuthMethodDigest, Realm: "cam", Nonce: "nonce"}.Marshal(),
|
||||
wantAuthType: cameradar.AuthDigest,
|
||||
},
|
||||
{
|
||||
name: "status unauthorized without auth headers",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
wantAuthType: cameradar.AuthUnknown,
|
||||
},
|
||||
{
|
||||
name: "status not found is unknown",
|
||||
statusCode: base.StatusNotFound,
|
||||
wantAuthType: cameradar.AuthUnknown,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.wantAuthType, authTypeFromStatus(test.statusCode, test.headers))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAuthMethod(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
statusCode base.StatusCode
|
||||
headers base.Header
|
||||
want cameradar.AuthType
|
||||
}{
|
||||
{
|
||||
name: "no auth when status ok",
|
||||
statusCode: base.StatusOK,
|
||||
headers: base.Header{
|
||||
"WWW-Authenticate": headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
},
|
||||
want: cameradar.AuthNone,
|
||||
},
|
||||
{
|
||||
name: "basic auth on unauthorized",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: base.Header{
|
||||
"WWW-Authenticate": headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
},
|
||||
want: cameradar.AuthBasic,
|
||||
},
|
||||
{
|
||||
name: "digest auth on unauthorized",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: base.Header{
|
||||
"WWW-Authenticate": headers.Authenticate{Method: headers.AuthMethodDigest, Realm: "cam", Nonce: "nonce"}.Marshal(),
|
||||
},
|
||||
want: cameradar.AuthDigest,
|
||||
},
|
||||
{
|
||||
name: "unknown auth on unauthorized without www-authenticate",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: nil,
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
{
|
||||
name: "unknown auth on other status",
|
||||
statusCode: base.StatusNotFound,
|
||||
headers: nil,
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
addr, port := startRTSPProbeServer(t, test.statusCode, test.headers)
|
||||
|
||||
attacker, err := New(testDictionary{}, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}
|
||||
|
||||
got, err := attacker.detectAuthMethod(t.Context(), stream)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.want, got.AuthenticationType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAuthMethod_HTTPTunnel_NonFatal(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scheme string
|
||||
}{
|
||||
{name: "http tunnel", scheme: "http"},
|
||||
{name: "https tunnel", scheme: "https"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
attacker, err := New(testDictionary{}, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := cameradar.Stream{
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 1,
|
||||
Scheme: test.scheme,
|
||||
}
|
||||
|
||||
got, err := attacker.detectAuthMethod(t.Context(), stream)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cameradar.AuthUnknown, got.AuthenticationType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAuthMethod_RTSPS(t *testing.T) {
|
||||
addr, port := startRTSPTLSProbeServer(t, base.StatusUnauthorized, base.Header{
|
||||
"WWW-Authenticate": headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
})
|
||||
|
||||
attacker, err := New(testDictionary{}, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
Scheme: "rtsps",
|
||||
}
|
||||
|
||||
got, err := attacker.detectAuthMethod(t.Context(), stream)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, cameradar.AuthBasic, got.AuthenticationType)
|
||||
}
|
||||
|
||||
func startRTSPProbeServer(t *testing.T, statusCode base.StatusCode, headers base.Header) (netip.Addr, uint16) {
|
||||
t.Helper()
|
||||
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = listener.Close()
|
||||
})
|
||||
|
||||
go func() {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_ = conn.SetDeadline(time.Now().Add(time.Second))
|
||||
|
||||
reader := bufio.NewReader(conn)
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if strings.TrimSpace(line) == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
statusText := statusTextFromCode(statusCode)
|
||||
|
||||
var builder strings.Builder
|
||||
_, _ = fmt.Fprintf(&builder, "RTSP/1.0 %d %s\r\n", statusCode, statusText)
|
||||
builder.WriteString("CSeq: 1\r\n")
|
||||
for key, values := range headers {
|
||||
for _, value := range values {
|
||||
_, _ = fmt.Fprintf(&builder, "%s: %s\r\n", key, value)
|
||||
}
|
||||
}
|
||||
builder.WriteString("Content-Length: 0\r\n\r\n")
|
||||
|
||||
_, _ = conn.Write([]byte(builder.String()))
|
||||
}()
|
||||
|
||||
tcpAddr, ok := listener.Addr().(*net.TCPAddr)
|
||||
require.True(t, ok)
|
||||
|
||||
return netip.MustParseAddr("127.0.0.1"), uint16(tcpAddr.Port)
|
||||
}
|
||||
|
||||
func startRTSPTLSProbeServer(t *testing.T, statusCode base.StatusCode, headers base.Header) (netip.Addr, uint16) {
|
||||
t.Helper()
|
||||
|
||||
listener, err := tls.Listen("tcp", "127.0.0.1:0", testTLSConfig(t))
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = listener.Close()
|
||||
})
|
||||
|
||||
go func() {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_ = conn.SetDeadline(time.Now().Add(time.Second))
|
||||
|
||||
reader := bufio.NewReader(conn)
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if strings.TrimSpace(line) == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
statusText := statusTextFromCode(statusCode)
|
||||
|
||||
var builder strings.Builder
|
||||
_, _ = fmt.Fprintf(&builder, "RTSP/1.0 %d %s\r\n", statusCode, statusText)
|
||||
builder.WriteString("CSeq: 1\r\n")
|
||||
for key, values := range headers {
|
||||
for _, value := range values {
|
||||
_, _ = fmt.Fprintf(&builder, "%s: %s\r\n", key, value)
|
||||
}
|
||||
}
|
||||
builder.WriteString("Content-Length: 0\r\n\r\n")
|
||||
|
||||
_, _ = conn.Write([]byte(builder.String()))
|
||||
}()
|
||||
|
||||
tcpAddr, ok := listener.Addr().(*net.TCPAddr)
|
||||
require.True(t, ok)
|
||||
|
||||
return netip.MustParseAddr("127.0.0.1"), uint16(tcpAddr.Port)
|
||||
}
|
||||
|
||||
func testTLSConfig(t *testing.T) *tls.Config {
|
||||
t.Helper()
|
||||
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
template := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
NotBefore: time.Now().Add(-time.Hour),
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
IPAddresses: []net.IP{net.ParseIP("127.0.0.1")},
|
||||
}
|
||||
|
||||
der, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key)
|
||||
require.NoError(t, err)
|
||||
|
||||
return &tls.Config{
|
||||
Certificates: []tls.Certificate{{
|
||||
Certificate: [][]byte{der},
|
||||
PrivateKey: key,
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func statusTextFromCode(code base.StatusCode) string {
|
||||
switch code {
|
||||
case base.StatusOK:
|
||||
return "OK"
|
||||
case base.StatusUnauthorized:
|
||||
return "Unauthorized"
|
||||
case base.StatusNotFound:
|
||||
return "Not Found"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,217 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/bluenviron/gortsplib/v5"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/liberrors"
|
||||
)
|
||||
|
||||
const (
|
||||
schemeRTSP = "rtsp"
|
||||
schemeRTSPS = "rtsps"
|
||||
schemeHTTP = "http"
|
||||
schemeHTTPS = "https"
|
||||
)
|
||||
|
||||
func (a Attacker) newRTSPClient(stream cameradar.Stream) (*gortsplib.Client, error) {
|
||||
u, err := stream.URL()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
if u.Scheme != schemeRTSP && u.Scheme != schemeRTSPS {
|
||||
return nil, fmt.Errorf("unsupported rtsp url scheme: %q", u.Scheme)
|
||||
}
|
||||
|
||||
client := &gortsplib.Client{
|
||||
ReadTimeout: a.timeout,
|
||||
WriteTimeout: a.timeout,
|
||||
Scheme: u.Scheme,
|
||||
Host: u.Host,
|
||||
}
|
||||
|
||||
switch stream.Scheme {
|
||||
case "":
|
||||
// No explicit transport was requested. Use plain RTSP/RTSPS from the URL.
|
||||
case schemeRTSP, schemeRTSPS:
|
||||
// Nothing to do.
|
||||
case schemeHTTP:
|
||||
client.Scheme = schemeRTSP
|
||||
client.Tunnel = gortsplib.TunnelHTTP
|
||||
case schemeHTTPS:
|
||||
client.Scheme = schemeRTSPS
|
||||
client.Tunnel = gortsplib.TunnelHTTP
|
||||
client.TLSConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported stream transport scheme: %q", stream.Scheme)
|
||||
}
|
||||
|
||||
err = client.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (a Attacker) describeStatus(stream cameradar.Stream) (base.StatusCode, error) {
|
||||
u, err := stream.URL()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
|
||||
client, err := a.newRTSPClient(stream)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
_, res, err := client.Describe(u)
|
||||
if err != nil {
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) {
|
||||
return badStatus.Code, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if res == nil {
|
||||
return 0, errors.New("no response received")
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
|
||||
// probeDescribeHeaders performs a manual DESCRIBE request and returns the status code and headers.
|
||||
//
|
||||
// NOTE: We do not use gortsplib here because it does not expose response headers when the status code is 401 Unauthorized,
|
||||
// which is exactly what we need in order to detect authentication methods.
|
||||
func (a Attacker) probeDescribeHeaders(ctx context.Context, u *base.URL) (base.StatusCode, base.Header, error) {
|
||||
dialer := &net.Dialer{Timeout: a.timeout}
|
||||
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
)
|
||||
switch u.Scheme {
|
||||
case schemeRTSP:
|
||||
conn, err = dialer.DialContext(ctx, "tcp", u.Host)
|
||||
case schemeRTSPS:
|
||||
tlsDialer := &tls.Dialer{
|
||||
NetDialer: dialer,
|
||||
Config: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
conn, err = tlsDialer.DialContext(ctx, "tcp", u.Host)
|
||||
default:
|
||||
return 0, nil, fmt.Errorf("unsupported rtsp url scheme: %q", u.Scheme)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
deadline, ok := ctx.Deadline()
|
||||
if !ok {
|
||||
deadline = time.Now().Add(a.timeout)
|
||||
}
|
||||
|
||||
err = conn.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
request := fmt.Sprintf(
|
||||
"DESCRIBE %s RTSP/1.0\r\nCSeq: 1\r\nUser-Agent: cameradar\r\nAccept: application/sdp\r\nHost: %s\r\n\r\n",
|
||||
u,
|
||||
u.Host,
|
||||
)
|
||||
_, err = conn.Write([]byte(request))
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
reader := textproto.NewReader(bufio.NewReader(conn))
|
||||
statusLine, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
fields := strings.Fields(statusLine)
|
||||
if len(fields) < 2 {
|
||||
return 0, nil, fmt.Errorf("invalid RTSP status line: %q", statusLine)
|
||||
}
|
||||
|
||||
code, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("parsing RTSP status code %q: %w", fields[1], err)
|
||||
}
|
||||
|
||||
mimeHeader, err := reader.ReadMIMEHeader()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
headers := make(base.Header)
|
||||
for key, values := range mimeHeader {
|
||||
headers[key] = append(base.HeaderValue(nil), values...)
|
||||
}
|
||||
|
||||
return base.StatusCode(code), headers, nil
|
||||
}
|
||||
|
||||
func authTypeFromHeaders(values base.HeaderValue) cameradar.AuthType {
|
||||
if len(values) == 0 {
|
||||
return cameradar.AuthUnknown
|
||||
}
|
||||
|
||||
var hasBasic bool
|
||||
var hasDigest bool
|
||||
|
||||
for _, value := range values {
|
||||
var authHeader headers.Authenticate
|
||||
err := authHeader.Unmarshal(base.HeaderValue{value})
|
||||
if err != nil {
|
||||
lower := strings.ToLower(value)
|
||||
hasDigest = hasDigest || strings.Contains(lower, "digest")
|
||||
hasBasic = hasBasic || strings.Contains(lower, "basic")
|
||||
continue
|
||||
}
|
||||
|
||||
switch authHeader.Method {
|
||||
case headers.AuthMethodDigest:
|
||||
hasDigest = true
|
||||
case headers.AuthMethodBasic:
|
||||
hasBasic = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasDigest {
|
||||
return cameradar.AuthDigest
|
||||
}
|
||||
if hasBasic {
|
||||
return cameradar.AuthBasic
|
||||
}
|
||||
return cameradar.AuthUnknown
|
||||
}
|
||||
|
||||
func headerValues(header base.Header, name string) base.HeaderValue {
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
for key, values := range header {
|
||||
if strings.EqualFold(key, name) {
|
||||
return values
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,166 @@
|
||||
package attack_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bluenviron/gortsplib/v5"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/auth"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/description"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/format"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/liberrors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type rtspServerConfig struct {
|
||||
allowAll bool
|
||||
allowedRoute string
|
||||
requireAuth bool
|
||||
username string
|
||||
password string
|
||||
authMethod headers.AuthMethod
|
||||
authHeader base.HeaderValue
|
||||
failOnAuth bool
|
||||
setupStatus base.StatusCode
|
||||
}
|
||||
|
||||
type testServerHandler struct {
|
||||
stream *gortsplib.ServerStream
|
||||
allowAll bool
|
||||
allowedRoute string
|
||||
requireAuth bool
|
||||
username string
|
||||
password string
|
||||
authHeader base.HeaderValue
|
||||
failOnAuth bool
|
||||
setupStatus base.StatusCode
|
||||
}
|
||||
|
||||
func (h *testServerHandler) OnDescribe(ctx *gortsplib.ServerHandlerOnDescribeCtx) (*base.Response, *gortsplib.ServerStream, error) {
|
||||
if !h.routeAllowed(ctx.Path) {
|
||||
return &base.Response{StatusCode: base.StatusNotFound}, nil, nil
|
||||
}
|
||||
|
||||
if h.failOnAuth && len(ctx.Request.Header["Authorization"]) > 0 {
|
||||
return &base.Response{StatusCode: base.StatusBadRequest}, nil, errors.New("forced auth failure")
|
||||
}
|
||||
|
||||
if h.requireAuth && !ctx.Conn.VerifyCredentials(ctx.Request, h.username, h.password) {
|
||||
return &base.Response{
|
||||
StatusCode: base.StatusUnauthorized,
|
||||
Header: base.Header{
|
||||
"WWW-Authenticate": h.authHeader,
|
||||
},
|
||||
}, nil, liberrors.ErrServerAuth{}
|
||||
}
|
||||
|
||||
return &base.Response{StatusCode: base.StatusOK}, h.stream, nil
|
||||
}
|
||||
|
||||
func (h *testServerHandler) OnSetup(ctx *gortsplib.ServerHandlerOnSetupCtx) (*base.Response, *gortsplib.ServerStream, error) {
|
||||
if !h.routeAllowed(ctx.Path) {
|
||||
return &base.Response{StatusCode: base.StatusNotFound}, nil, nil
|
||||
}
|
||||
|
||||
if h.requireAuth && !ctx.Conn.VerifyCredentials(ctx.Request, h.username, h.password) {
|
||||
return &base.Response{
|
||||
StatusCode: base.StatusUnauthorized,
|
||||
Header: base.Header{
|
||||
"WWW-Authenticate": h.authHeader,
|
||||
},
|
||||
}, nil, liberrors.ErrServerAuth{}
|
||||
}
|
||||
|
||||
status := base.StatusOK
|
||||
if h.setupStatus != 0 {
|
||||
status = h.setupStatus
|
||||
}
|
||||
|
||||
return &base.Response{StatusCode: status}, h.stream, nil
|
||||
}
|
||||
|
||||
func (h *testServerHandler) routeAllowed(path string) bool {
|
||||
path = strings.TrimLeft(path, "/")
|
||||
return h.allowAll || path == h.allowedRoute
|
||||
}
|
||||
|
||||
func startRTSPServer(t *testing.T, cfg rtspServerConfig) (netip.Addr, uint16) {
|
||||
t.Helper()
|
||||
|
||||
handler := &testServerHandler{
|
||||
allowAll: cfg.allowAll,
|
||||
allowedRoute: cfg.allowedRoute,
|
||||
requireAuth: cfg.requireAuth,
|
||||
username: cfg.username,
|
||||
password: cfg.password,
|
||||
failOnAuth: cfg.failOnAuth,
|
||||
setupStatus: cfg.setupStatus,
|
||||
}
|
||||
|
||||
if len(cfg.authHeader) > 0 {
|
||||
handler.authHeader = cfg.authHeader
|
||||
} else {
|
||||
authHeader := headers.Authenticate{
|
||||
Method: cfg.authMethod,
|
||||
Realm: "cameradar",
|
||||
}
|
||||
if cfg.authMethod == headers.AuthMethodDigest {
|
||||
authHeader.Nonce = "nonce"
|
||||
}
|
||||
handler.authHeader = authHeader.Marshal()
|
||||
}
|
||||
|
||||
server := &gortsplib.Server{
|
||||
Handler: handler,
|
||||
RTSPAddress: "127.0.0.1:0",
|
||||
AuthMethods: authMethods(cfg.authMethod),
|
||||
}
|
||||
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
desc := &description.Session{
|
||||
Medias: []*description.Media{{
|
||||
Type: description.MediaTypeVideo,
|
||||
Formats: []format.Format{&format.H264{
|
||||
PayloadTyp: 96,
|
||||
PacketizationMode: 1,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
|
||||
stream := &gortsplib.ServerStream{
|
||||
Server: server,
|
||||
Desc: desc,
|
||||
}
|
||||
err = stream.Initialize()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stream.Close)
|
||||
|
||||
handler.stream = stream
|
||||
|
||||
listener := server.NetListener()
|
||||
require.NotNil(t, listener)
|
||||
|
||||
tcpAddr, ok := listener.Addr().(*net.TCPAddr)
|
||||
require.True(t, ok)
|
||||
|
||||
return netip.MustParseAddr("127.0.0.1"), uint16(tcpAddr.Port)
|
||||
}
|
||||
|
||||
func authMethods(method headers.AuthMethod) []auth.VerifyMethod {
|
||||
switch method {
|
||||
case headers.AuthMethodDigest:
|
||||
return []auth.VerifyMethod{auth.VerifyMethodDigestMD5}
|
||||
case headers.AuthMethodBasic:
|
||||
return []auth.VerifyMethod{auth.VerifyMethodBasic}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,164 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStreamURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
stream cameradar.Stream
|
||||
wantURL string
|
||||
wantParsedScheme string
|
||||
}{
|
||||
{
|
||||
name: "empty route",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
},
|
||||
wantURL: "rtsp://192.168.0.10:554/",
|
||||
},
|
||||
{
|
||||
name: "root route",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"/"},
|
||||
},
|
||||
wantURL: "rtsp://192.168.0.10:554/",
|
||||
},
|
||||
{
|
||||
name: "multiple leading slashes",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"////"},
|
||||
},
|
||||
wantURL: "rtsp://192.168.0.10:554/",
|
||||
},
|
||||
{
|
||||
name: "route with no leading slash",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream"},
|
||||
},
|
||||
wantURL: "rtsp://192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "route with leading slash",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"/stream"},
|
||||
},
|
||||
wantURL: "rtsp://192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "route with trailing slash",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream/"},
|
||||
},
|
||||
wantURL: "rtsp://192.168.0.10:554/stream/",
|
||||
},
|
||||
{
|
||||
name: "route with spaces",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{" /stream "},
|
||||
},
|
||||
wantURL: "rtsp://192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "username and password",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream"},
|
||||
Username: "admin",
|
||||
Password: "admin123",
|
||||
},
|
||||
wantURL: "rtsp://admin:admin123@192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "empty username with password",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream"},
|
||||
Password: "pass",
|
||||
},
|
||||
wantURL: "rtsp://:pass@192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "username only",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream"},
|
||||
Username: "user",
|
||||
},
|
||||
wantURL: "rtsp://user:@192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "http scheme",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream"},
|
||||
Scheme: "http",
|
||||
},
|
||||
wantURL: "http://192.168.0.10:554/stream",
|
||||
wantParsedScheme: "rtsp",
|
||||
},
|
||||
{
|
||||
name: "https scheme",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream"},
|
||||
Scheme: "https",
|
||||
},
|
||||
wantURL: "https://192.168.0.10:554/stream",
|
||||
wantParsedScheme: "rtsps",
|
||||
},
|
||||
{
|
||||
name: "rtsps scheme",
|
||||
stream: cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
Routes: []string{"stream"},
|
||||
Scheme: "rtsps",
|
||||
},
|
||||
wantURL: "rtsps://192.168.0.10:554/stream",
|
||||
wantParsedScheme: "rtsps",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
gotURL := test.stream.String()
|
||||
require.Equal(t, test.wantURL, gotURL)
|
||||
|
||||
parsedURL, err := test.stream.URL()
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedURL, err := url.Parse(test.wantURL)
|
||||
require.NoError(t, err)
|
||||
wantParsedScheme := test.wantParsedScheme
|
||||
if wantParsedScheme == "" {
|
||||
wantParsedScheme = expectedURL.Scheme
|
||||
}
|
||||
require.Equal(t, wantParsedScheme, parsedURL.Scheme)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
type attackFn func(context.Context, cameradar.Stream) (cameradar.Stream, error)
|
||||
|
||||
func runParallel(ctx context.Context, targets []cameradar.Stream, fn attackFn) ([]cameradar.Stream, error) {
|
||||
if len(targets) == 0 {
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
workerCount := parallelWorkerCount(len(targets))
|
||||
if workerCount == 0 {
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
jobs := make(chan attackJob)
|
||||
|
||||
updated := make([]cameradar.Stream, len(targets))
|
||||
copy(updated, targets)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range workerCount {
|
||||
wg.Go(func() {
|
||||
runWorker(ctx, jobs, cancel, fn, updated, errCh)
|
||||
})
|
||||
}
|
||||
|
||||
queueJobs(ctx, jobs, targets)
|
||||
close(jobs)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return updated, err
|
||||
default:
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
type attackJob struct {
|
||||
index int
|
||||
stream cameradar.Stream
|
||||
}
|
||||
|
||||
func queueJobs(ctx context.Context, jobs chan<- attackJob, targets []cameradar.Stream) {
|
||||
for i, stream := range targets {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case jobs <- attackJob{index: i, stream: stream}:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runWorker(ctx context.Context, jobs <-chan attackJob, cancelFn func(), fn attackFn, updated []cameradar.Stream, errCh chan error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case job, ok := <-jobs:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
stream, err := fn(ctx, job.stream)
|
||||
if err != nil {
|
||||
select {
|
||||
case errCh <- err:
|
||||
default:
|
||||
}
|
||||
|
||||
cancelFn()
|
||||
return
|
||||
}
|
||||
|
||||
updated[job.index] = stream
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parallelWorkerCount(targetCount int) int {
|
||||
if targetCount <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
workers := max(runtime.GOMAXPROCS(0), 1)
|
||||
if targetCount < workers {
|
||||
return targetCount
|
||||
}
|
||||
|
||||
return workers
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
{
|
||||
"usernames": [
|
||||
"",
|
||||
"666666",
|
||||
"888888",
|
||||
"Admin",
|
||||
"admin",
|
||||
"admin1",
|
||||
"administrator",
|
||||
"Administrator",
|
||||
"aiphone",
|
||||
"Dinion",
|
||||
"none",
|
||||
"root",
|
||||
"Root",
|
||||
"service",
|
||||
"supervisor",
|
||||
"ubnt"
|
||||
],
|
||||
"passwords": [
|
||||
"",
|
||||
"0000",
|
||||
"00000",
|
||||
"1111",
|
||||
"111111",
|
||||
"1111111",
|
||||
"123",
|
||||
"1234",
|
||||
"12345",
|
||||
"123456",
|
||||
"1234567",
|
||||
"12345678",
|
||||
"123456789",
|
||||
"12345678910",
|
||||
"4321",
|
||||
"666666",
|
||||
"6fJjMKYx",
|
||||
"888888",
|
||||
"9999",
|
||||
"admin",
|
||||
"admin123456",
|
||||
"admin pass",
|
||||
"Admin",
|
||||
"admin123",
|
||||
"administrator",
|
||||
"Administrator",
|
||||
"aiphone",
|
||||
"camera",
|
||||
"Camera",
|
||||
"fliradmin",
|
||||
"GRwvcj8j",
|
||||
"hikvision",
|
||||
"hikadmin",
|
||||
"HuaWei123",
|
||||
"ikwd",
|
||||
"jvc",
|
||||
"kj3TqCWv",
|
||||
"meinsm",
|
||||
"pass",
|
||||
"Pass",
|
||||
"password",
|
||||
"password123",
|
||||
"qwerty",
|
||||
"qwerty123",
|
||||
"Recorder",
|
||||
"reolink",
|
||||
"root",
|
||||
"service",
|
||||
"supervisor",
|
||||
"support",
|
||||
"system",
|
||||
"tlJwpbo6",
|
||||
"toor",
|
||||
"tp-link",
|
||||
"ubnt",
|
||||
"user",
|
||||
"wbox",
|
||||
"wbox123",
|
||||
"Y5eIMz3C"
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,197 @@
|
||||
|
||||
live/ch01_0
|
||||
0/1:1/main
|
||||
0/usrnm:pwd/main
|
||||
0/video1
|
||||
1
|
||||
1.AMP
|
||||
1/h264major
|
||||
1/stream1
|
||||
11
|
||||
12
|
||||
125
|
||||
1080p
|
||||
1440p
|
||||
480p
|
||||
4K
|
||||
666
|
||||
720p
|
||||
AVStream1_1
|
||||
CAM_ID.password.mp2
|
||||
CH001.sdp
|
||||
GetData.cgi
|
||||
HD
|
||||
HighResolutionVideo
|
||||
LowResolutionVideo
|
||||
MediaInput/h264
|
||||
MediaInput/mpeg4
|
||||
ONVIF/MediaInput
|
||||
ONVIF/MediaInput?profile=4_def_profile6
|
||||
StdCh1
|
||||
Streaming/Channels/1
|
||||
Streaming/Unicast/channels/101
|
||||
StreamingSetting?version=1.0&action=getRTSPStream&ChannelID=1&ChannelName=Channel1
|
||||
VideoInput/1/h264/1
|
||||
VideoInput/1/mpeg4/1
|
||||
access_code
|
||||
access_name_for_stream_1_to_5
|
||||
api/mjpegvideo.cgi
|
||||
av0_0
|
||||
av2
|
||||
avc
|
||||
avn=2
|
||||
axis-media/media.amp
|
||||
axis-media/media.amp?camera=1
|
||||
axis-media/media.amp?videocodec=h264
|
||||
cam
|
||||
cam/realmonitor
|
||||
cam/realmonitor?channel=0&subtype=0
|
||||
cam/realmonitor?channel=1&subtype=0
|
||||
cam/realmonitor?channel=1&subtype=1
|
||||
cam/realmonitor?channel=1&subtype=1&unicast=true&proto=Onvif
|
||||
cam0
|
||||
cam0_0
|
||||
cam0_1
|
||||
cam1
|
||||
cam1/h264
|
||||
cam1/h264/multicast
|
||||
cam1/mjpeg
|
||||
cam1/mpeg4
|
||||
cam1/mpeg4?user='username'&pwd='password'
|
||||
cam1/onvif-h264
|
||||
camera.stm
|
||||
ch0
|
||||
ch00/0
|
||||
ch001.sdp
|
||||
ch01.264
|
||||
ch01.264?
|
||||
ch01.264?ptype=tcp
|
||||
ch1_0
|
||||
ch2_0
|
||||
ch3_0
|
||||
ch4_0
|
||||
ch1/0
|
||||
ch2/0
|
||||
ch3/0
|
||||
ch4/0
|
||||
ch0_0.h264
|
||||
ch0_unicast_firststream
|
||||
ch0_unicast_secondstream
|
||||
ch1-s1
|
||||
channel1
|
||||
gnz_media/main
|
||||
h264
|
||||
h264.sdp
|
||||
h264/ch1/sub/av_stream
|
||||
h264/media.amp
|
||||
h264Preview_01_main
|
||||
h264Preview_01_sub
|
||||
h264_vga.sdp
|
||||
h264_stream
|
||||
image.mpg
|
||||
img/media.sav
|
||||
img/media.sav?channel=1
|
||||
img/video.asf
|
||||
img/video.sav
|
||||
ioImage/1
|
||||
ipcam.sdp
|
||||
ipcam_h264.sdp
|
||||
ipcam_mjpeg.sdp
|
||||
live
|
||||
live.sdp
|
||||
live/av0
|
||||
live/ch0
|
||||
live/ch00_0
|
||||
live/ch01_0
|
||||
live/h264
|
||||
live/main
|
||||
live/main0
|
||||
live/mpeg4
|
||||
live1.sdp
|
||||
live3.sdp
|
||||
live_mpeg4.sdp
|
||||
live_st1
|
||||
livestream
|
||||
main
|
||||
media
|
||||
media.amp
|
||||
media.amp?streamprofile=Profile1
|
||||
media/media.amp
|
||||
media/video1
|
||||
medias2
|
||||
mjpeg/media.smp
|
||||
mp4
|
||||
mpeg/media.amp
|
||||
mpeg4
|
||||
mpeg4/1/media.amp
|
||||
mpeg4/media.amp
|
||||
mpeg4/media.smp
|
||||
mpeg4unicast
|
||||
mpg4/rtsp.amp
|
||||
multicaststream
|
||||
now.mp4
|
||||
nph-h264.cgi
|
||||
nphMpeg4/g726-640x
|
||||
nphMpeg4/g726-640x48
|
||||
nphMpeg4/g726-640x480
|
||||
nphMpeg4/nil-320x240
|
||||
onvif-media/media.amp
|
||||
onvif1
|
||||
pass@10.0.0.5:6667/blinkhd
|
||||
play1.sdp
|
||||
play2.sdp
|
||||
profile0
|
||||
profile1
|
||||
profile2
|
||||
profile2/media.smp
|
||||
profile5/media.smp
|
||||
rtpvideo1.sdp
|
||||
rtsp_live0
|
||||
rtsp_live1
|
||||
rtsp_live2
|
||||
rtsp_tunnel
|
||||
rtsph264
|
||||
rtsph2641080p
|
||||
snap.jpg
|
||||
stream
|
||||
stream/0
|
||||
stream/1
|
||||
stream/live.sdp
|
||||
stream.sdp
|
||||
stream1
|
||||
streaming/channels/0
|
||||
streaming/channels/1
|
||||
streaming/channels/101
|
||||
tcp/av0_0
|
||||
test
|
||||
tmpfs/auto.jpg
|
||||
trackID=1
|
||||
ucast/11
|
||||
udp/av0_0
|
||||
udp/unicast/aiphone_H264
|
||||
udpstream
|
||||
user.pin.mp2
|
||||
user=admin&password=&channel=1&stream=0.sdp?
|
||||
user=admin&password=&channel=1&stream=0.sdp?real_stream
|
||||
user=admin_password=?????_channel=1_stream=0.sdp?real_stream
|
||||
user=admin_password=R5XFY888_channel=1_stream=0.sdp?real_stream
|
||||
user_defined
|
||||
v2
|
||||
video
|
||||
video.3gp
|
||||
video.h264
|
||||
video.mjpg
|
||||
video.mp4
|
||||
video.pro1
|
||||
video.pro2
|
||||
video.pro3
|
||||
video0
|
||||
video0.sdp
|
||||
video1
|
||||
video1.sdp
|
||||
video1+audio1
|
||||
videoMain
|
||||
videoinput_1/h264_1/media.stm
|
||||
videostream.asf
|
||||
vis
|
||||
wfov
|
||||
@@ -0,0 +1,11 @@
|
||||
package dict
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
)
|
||||
|
||||
//go:embed assets/credentials.json
|
||||
var defaultCredentials []byte
|
||||
|
||||
//go:embed assets/routes
|
||||
var defaultRoutes string
|
||||
@@ -0,0 +1,134 @@
|
||||
package dict
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// credentials is a map of credentials.
|
||||
type credentials struct {
|
||||
Usernames []string `json:"usernames"`
|
||||
Passwords []string `json:"passwords"`
|
||||
}
|
||||
|
||||
// routes is a slice of routes.
|
||||
type routes []string
|
||||
|
||||
// Dictionary groups routes and credentials for attacks.
|
||||
type Dictionary struct {
|
||||
creds credentials
|
||||
routes routes
|
||||
}
|
||||
|
||||
// Usernames returns the usernames list.
|
||||
func (d Dictionary) Usernames() []string {
|
||||
return d.creds.Usernames
|
||||
}
|
||||
|
||||
// Passwords returns the passwords list.
|
||||
func (d Dictionary) Passwords() []string {
|
||||
return d.creds.Passwords
|
||||
}
|
||||
|
||||
// Routes returns the routes list.
|
||||
func (d Dictionary) Routes() []string {
|
||||
return d.routes
|
||||
}
|
||||
|
||||
// New loads a dictionary using the provided configuration.
|
||||
func New(credentialsPath, routesPath string) (Dictionary, error) {
|
||||
creds, err := loadCredentials(credentialsPath)
|
||||
if err != nil {
|
||||
return Dictionary{}, err
|
||||
}
|
||||
|
||||
routes, err := loadRoutes(routesPath)
|
||||
if err != nil {
|
||||
return Dictionary{}, err
|
||||
}
|
||||
|
||||
return Dictionary{
|
||||
creds: creds,
|
||||
routes: routes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// loadCredentials loads credentials from a custom path or embedded defaults.
|
||||
func loadCredentials(credentialsPath string) (credentials, error) {
|
||||
if strings.TrimSpace(credentialsPath) != "" {
|
||||
content, err := os.ReadFile(credentialsPath)
|
||||
if err != nil {
|
||||
return credentials{}, fmt.Errorf("reading credentials dictionary %q: %w", credentialsPath, err)
|
||||
}
|
||||
|
||||
creds, err := parseCredentials(content)
|
||||
if err != nil {
|
||||
return credentials{}, err
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
creds, err := parseCredentials(defaultCredentials)
|
||||
if err != nil {
|
||||
return credentials{}, err
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// loadRoutes loads routes from a custom path or embedded defaults.
|
||||
func loadRoutes(routesPath string) (routes, error) {
|
||||
if strings.TrimSpace(routesPath) != "" {
|
||||
file, err := os.Open(routesPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening routes dictionary %q: %w", routesPath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
routes, err := parseRoutes(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
reader := strings.NewReader(defaultRoutes)
|
||||
routes, err := parseRoutes(io.NopCloser(reader))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func parseCredentials(content []byte) (credentials, error) {
|
||||
if len(content) == 0 {
|
||||
return credentials{}, errors.New("credentials dictionary is empty")
|
||||
}
|
||||
|
||||
var creds credentials
|
||||
err := json.Unmarshal(content, &creds)
|
||||
if err != nil {
|
||||
return credentials{}, fmt.Errorf("reading dictionary contents: %w", err)
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
func parseRoutes(reader io.ReadCloser) (routes, error) {
|
||||
defer reader.Close()
|
||||
|
||||
var routes routes
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
routes = append(routes, scanner.Text())
|
||||
}
|
||||
|
||||
return routes, scanner.Err()
|
||||
}
|
||||
@@ -0,0 +1,163 @@
|
||||
package dict_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/dict"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew_LoadsDictionaryFromPaths(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
credsPath := writeTempFile(t, tempDir, "creds.json", `{"usernames":["alice"],"passwords":["secret"]}`)
|
||||
routesPath := writeTempFile(t, tempDir, "routes", "stream\nother\n")
|
||||
|
||||
got, err := dict.New(credsPath, routesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string{"alice"}, got.Usernames())
|
||||
assert.Equal(t, []string{"secret"}, got.Passwords())
|
||||
assert.Equal(t, []string{"stream", "other"}, got.Routes())
|
||||
}
|
||||
|
||||
func TestNew_CustomAndDefaultPaths(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
customCredsPath := writeTempFile(t, tempDir, "creds.json", `{"usernames":["alice"],"passwords":["secret"]}`)
|
||||
customRoutesPath := writeTempFile(t, tempDir, "routes", "stream\nother\n")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
credentialsPath string
|
||||
routesPath string
|
||||
assertFunc func(t *testing.T, got dict.Dictionary)
|
||||
}{
|
||||
{
|
||||
name: "custom credentials and routes",
|
||||
credentialsPath: customCredsPath,
|
||||
routesPath: customRoutesPath,
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.Equal(t, []string{"alice"}, got.Usernames())
|
||||
assert.Equal(t, []string{"secret"}, got.Passwords())
|
||||
assert.Equal(t, []string{"stream", "other"}, got.Routes())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom credentials default routes",
|
||||
credentialsPath: customCredsPath,
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.Equal(t, []string{"alice"}, got.Usernames())
|
||||
assert.Equal(t, []string{"secret"}, got.Passwords())
|
||||
assert.NotEmpty(t, got.Routes())
|
||||
assert.Contains(t, got.Routes(), "stream")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default credentials custom routes",
|
||||
routesPath: customRoutesPath,
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.NotEmpty(t, got.Usernames())
|
||||
assert.Contains(t, got.Usernames(), "admin")
|
||||
assert.NotEmpty(t, got.Passwords())
|
||||
assert.Contains(t, got.Passwords(), "admin")
|
||||
assert.Equal(t, []string{"stream", "other"}, got.Routes())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "whitespace paths use defaults",
|
||||
credentialsPath: " \t\n",
|
||||
routesPath: "\n\t",
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.NotEmpty(t, got.Usernames())
|
||||
assert.Contains(t, got.Usernames(), "admin")
|
||||
assert.NotEmpty(t, got.Passwords())
|
||||
assert.Contains(t, got.Passwords(), "admin")
|
||||
assert.NotEmpty(t, got.Routes())
|
||||
assert.Contains(t, got.Routes(), "stream")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got, err := dict.New(test.credentialsPath, test.routesPath)
|
||||
require.NoError(t, err)
|
||||
test.assertFunc(t, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_Errors(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
validCredsPath := writeTempFile(t, tempDir, "creds.json", `{"usernames":["alice"],"passwords":["secret"]}`)
|
||||
validRoutesPath := writeTempFile(t, tempDir, "routes", "stream\n")
|
||||
invalidJSONPath := writeTempFile(t, tempDir, "invalid.json", "{")
|
||||
emptyCredsPath := writeTempFile(t, tempDir, "empty.json", "")
|
||||
longRoute := strings.Repeat("a", bufio.MaxScanTokenSize+1)
|
||||
tooLongRoutesPath := writeTempFile(t, tempDir, "routes-too-long", longRoute)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
credentialsPath string
|
||||
routesPath string
|
||||
wantErrContains string
|
||||
wantErrIs error
|
||||
}{
|
||||
{
|
||||
name: "missing credentials file",
|
||||
credentialsPath: filepath.Join(tempDir, "missing.json"),
|
||||
routesPath: validRoutesPath,
|
||||
wantErrContains: "reading credentials dictionary",
|
||||
},
|
||||
{
|
||||
name: "invalid credentials json",
|
||||
credentialsPath: invalidJSONPath,
|
||||
routesPath: validRoutesPath,
|
||||
wantErrContains: "reading dictionary contents",
|
||||
},
|
||||
{
|
||||
name: "empty credentials file",
|
||||
credentialsPath: emptyCredsPath,
|
||||
routesPath: validRoutesPath,
|
||||
wantErrContains: "credentials dictionary is empty",
|
||||
},
|
||||
{
|
||||
name: "missing routes file",
|
||||
credentialsPath: validCredsPath,
|
||||
routesPath: filepath.Join(tempDir, "missing-routes"),
|
||||
wantErrContains: "opening routes dictionary",
|
||||
},
|
||||
{
|
||||
name: "routes file too long",
|
||||
credentialsPath: validCredsPath,
|
||||
routesPath: tooLongRoutesPath,
|
||||
wantErrIs: bufio.ErrTooLong,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := dict.New(test.credentialsPath, test.routesPath)
|
||||
require.Error(t, err)
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
}
|
||||
if test.wantErrIs != nil {
|
||||
assert.True(t, errors.Is(err, test.wantErrIs))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func writeTempFile(t *testing.T, dir, name, content string) string {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, name)
|
||||
require.NoError(t, os.WriteFile(path, []byte(content), 0o600))
|
||||
return path
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
package output
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
)
|
||||
|
||||
type m3uReporter struct {
|
||||
delegate ui.Reporter
|
||||
outputPath string
|
||||
}
|
||||
|
||||
// NewM3UReporter wraps the provided reporter and writes an M3U playlist on summary.
|
||||
func NewM3UReporter(delegate ui.Reporter, outputPath string) ui.Reporter {
|
||||
return &m3uReporter{
|
||||
delegate: delegate,
|
||||
outputPath: strings.TrimSpace(outputPath),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Start(step cameradar.Step, message string) {
|
||||
r.delegate.Start(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Done(step cameradar.Step, message string) {
|
||||
r.delegate.Done(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Progress(step cameradar.Step, message string) {
|
||||
r.delegate.Progress(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Debug(step cameradar.Step, message string) {
|
||||
r.delegate.Debug(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Error(step cameradar.Step, err error) {
|
||||
r.delegate.Error(step, err)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Summary(streams []cameradar.Stream, err error) {
|
||||
r.delegate.Summary(streams, err)
|
||||
if r.outputPath == "" {
|
||||
return
|
||||
}
|
||||
|
||||
writeErr := writeM3UFile(r.outputPath, streams)
|
||||
if writeErr != nil {
|
||||
r.delegate.Error(cameradar.StepSummary, writeErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *m3uReporter) UpdateSummary(streams []cameradar.Stream) {
|
||||
updater, ok := r.delegate.(interface{ UpdateSummary([]cameradar.Stream) })
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Close() {
|
||||
r.delegate.Close()
|
||||
}
|
||||
|
||||
func writeM3UFile(path string, streams []cameradar.Stream) error {
|
||||
content := BuildM3U(streams)
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "." {
|
||||
err := os.MkdirAll(dir, 0o750)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating output directory %q: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := os.WriteFile(path, []byte(content), 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing m3u output: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildM3U creates an M3U playlist with discovered streams.
|
||||
func BuildM3U(streams []cameradar.Stream) string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("#EXTM3U\n")
|
||||
for _, stream := range streams {
|
||||
url := formatRTSPURL(stream)
|
||||
if url == "" {
|
||||
continue
|
||||
}
|
||||
builder.WriteString("#EXTINF:-1,")
|
||||
builder.WriteString(formatStreamLabel(stream))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(url)
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func formatStreamLabel(stream cameradar.Stream) string {
|
||||
label := stream.Address.String() + ":" + strconv.FormatUint(uint64(stream.Port), 10)
|
||||
if stream.Device == "" {
|
||||
return label
|
||||
}
|
||||
return label + " (" + stream.Device + ")"
|
||||
}
|
||||
|
||||
func formatRTSPURL(stream cameradar.Stream) string {
|
||||
path := "/" + strings.TrimLeft(strings.TrimSpace(stream.Route()), "/")
|
||||
|
||||
credentials := ""
|
||||
if stream.CredentialsFound && (stream.Username != "" || stream.Password != "") {
|
||||
credentials = stream.Username + ":" + stream.Password + "@"
|
||||
}
|
||||
|
||||
return "rtsp://" + credentials + stream.Address.String() + ":" + strconv.FormatUint(uint64(stream.Port), 10) + path
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/masscan"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/nmap"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/skip"
|
||||
)
|
||||
|
||||
// Supported discovery backends.
|
||||
const (
|
||||
ScannerNmap = "nmap"
|
||||
ScannerMasscan = "masscan"
|
||||
)
|
||||
|
||||
// Config configures how Cameradar discovers RTSP streams.
|
||||
type Config struct {
|
||||
SkipScan bool
|
||||
Targets []string
|
||||
Ports []string
|
||||
ScanSpeed int16
|
||||
Scanner string
|
||||
}
|
||||
|
||||
// Reporter reports scan progress and debug information.
|
||||
type Reporter interface {
|
||||
Debug(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// New builds a stream scanner based on the provided configuration.
|
||||
func New(config Config, reporter Reporter) (cameradar.StreamScanner, error) {
|
||||
expandedTargets, err := expandTargetsForScan(config.Targets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.SkipScan {
|
||||
return skip.New(expandedTargets, config.Ports), nil
|
||||
}
|
||||
|
||||
scanner := strings.ToLower(strings.TrimSpace(config.Scanner))
|
||||
if scanner == "" {
|
||||
scanner = ScannerNmap
|
||||
}
|
||||
|
||||
switch scanner {
|
||||
case ScannerNmap:
|
||||
return nmap.New(config.ScanSpeed, expandedTargets, config.Ports, reporter)
|
||||
case ScannerMasscan:
|
||||
return masscan.New(expandedTargets, config.Ports, reporter)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported scanner %q", scanner)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package scan_test
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew_UsesSkipScanner(t *testing.T) {
|
||||
config := scan.Config{
|
||||
SkipScan: true,
|
||||
Targets: []string{
|
||||
"192.0.2.0/30",
|
||||
"192.0.2.10-11",
|
||||
},
|
||||
Ports: []string{"554", "8554-8555"},
|
||||
ScanSpeed: 4,
|
||||
}
|
||||
|
||||
scanner, err := scan.New(config, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
addrs := []netip.Addr{
|
||||
netip.MustParseAddr("192.0.2.0"),
|
||||
netip.MustParseAddr("192.0.2.1"),
|
||||
netip.MustParseAddr("192.0.2.2"),
|
||||
netip.MustParseAddr("192.0.2.3"),
|
||||
netip.MustParseAddr("192.0.2.10"),
|
||||
netip.MustParseAddr("192.0.2.11"),
|
||||
}
|
||||
portsExpected := []uint16{554, 8554, 8555}
|
||||
|
||||
var expected []cameradar.Stream
|
||||
for _, addr := range addrs {
|
||||
for _, port := range portsExpected {
|
||||
expected = append(expected, cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, expected, streams)
|
||||
}
|
||||
|
||||
func TestNew_SkipScanPropagatesErrors(t *testing.T) {
|
||||
config := scan.Config{
|
||||
SkipScan: true,
|
||||
Targets: []string{"192.0.2.1"},
|
||||
Ports: []string{"8555-8554"},
|
||||
}
|
||||
|
||||
scanner, err := scan.New(config, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid port range")
|
||||
}
|
||||
|
||||
func TestNew_UnsupportedScanner(t *testing.T) {
|
||||
config := scan.Config{
|
||||
Targets: []string{"192.0.2.1"},
|
||||
Ports: []string{"554"},
|
||||
Scanner: "unsupported",
|
||||
}
|
||||
|
||||
_, err := scan.New(config, nil)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "unsupported scanner")
|
||||
}
|
||||
|
||||
func TestNew_SkipScanIgnoresUnsupportedScanner(t *testing.T) {
|
||||
config := scan.Config{
|
||||
SkipScan: true,
|
||||
Targets: []string{"192.0.2.1"},
|
||||
Ports: []string{"554"},
|
||||
Scanner: "unsupported",
|
||||
}
|
||||
|
||||
scanner, err := scan.New(config, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []cameradar.Stream{{Address: netip.MustParseAddr("192.0.2.1"), Port: 554}}, streams)
|
||||
}
|
||||
@@ -0,0 +1,113 @@
|
||||
package masscan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/pkg/ports"
|
||||
masscanlib "github.com/Ullaakut/masscan"
|
||||
)
|
||||
|
||||
// Reporter reports scan progress and debug information.
|
||||
type Reporter interface {
|
||||
Debug(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// Runner is something that can run a masscan scan.
|
||||
type Runner interface {
|
||||
Run(ctx context.Context) (*masscanlib.Run, error)
|
||||
}
|
||||
|
||||
// Scanner scans targets and ports for RTSP streams.
|
||||
type Scanner struct {
|
||||
runner Runner
|
||||
reporter Reporter
|
||||
}
|
||||
|
||||
// New returns a Scanner configured with the provided targets and ports.
|
||||
func New(targets, ports []string, reporter Reporter) (*Scanner, error) {
|
||||
runner, err := masscanlib.NewScanner(
|
||||
masscanlib.WithTargets(targets...),
|
||||
masscanlib.WithPorts(ports...),
|
||||
masscanlib.WithOpenOnly(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating masscan scanner: %w", err)
|
||||
}
|
||||
|
||||
return &Scanner{
|
||||
runner: runner,
|
||||
reporter: reporter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Scan discovers RTSP streams on the configured targets and ports.
|
||||
func (s *Scanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
return runScan(ctx, s.runner, s.reporter)
|
||||
}
|
||||
|
||||
func runScan(ctx context.Context, runner Runner, reporter Reporter) ([]cameradar.Stream, error) {
|
||||
results, err := runner.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning network: %w", err)
|
||||
}
|
||||
|
||||
for _, warning := range results.Warnings() {
|
||||
reporter.Debug(cameradar.StepScan, "masscan warning: "+warning)
|
||||
}
|
||||
|
||||
var streams []cameradar.Stream
|
||||
for _, host := range results.Hosts {
|
||||
address := strings.TrimSpace(host.Address)
|
||||
if address == "" {
|
||||
reporter.Progress(cameradar.StepScan, "Skipping host with empty address")
|
||||
continue
|
||||
}
|
||||
|
||||
addr, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Skipping invalid address %q: %v", host.Address, err))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, port := range host.Ports {
|
||||
if port.Status != "open" {
|
||||
continue
|
||||
}
|
||||
|
||||
if port.Number <= 0 || port.Number > 65535 {
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Skipping invalid port %d on %s", port.Number, host.Address))
|
||||
continue
|
||||
}
|
||||
|
||||
scheme := ports.InferTunnelScheme(uint16(port.Number), "")
|
||||
|
||||
streams = append(streams, cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: uint16(port.Number),
|
||||
Scheme: scheme,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Found %d RTSP streams", len(streams)))
|
||||
updateSummary(reporter, streams)
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
type summaryUpdater interface {
|
||||
UpdateSummary(streams []cameradar.Stream)
|
||||
}
|
||||
|
||||
func updateSummary(reporter Reporter, streams []cameradar.Stream) {
|
||||
updater, ok := reporter.(summaryUpdater)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
@@ -0,0 +1,158 @@
|
||||
package masscan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
masscanlib "github.com/Ullaakut/masscan"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRunScan(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
result *masscanlib.Run
|
||||
err error
|
||||
wantStreams []cameradar.Stream
|
||||
wantDebug []string
|
||||
wantProgress []string
|
||||
wantErrContains string
|
||||
}{
|
||||
{
|
||||
name: "filters invalid addresses, closed and invalid ports",
|
||||
result: &masscanlib.Run{
|
||||
Hosts: []masscanlib.Host{
|
||||
{
|
||||
Address: "192.0.2.10",
|
||||
Ports: []masscanlib.Port{
|
||||
{Number: 554, Status: "open"},
|
||||
{Number: 8554, Status: "closed"},
|
||||
{Number: 0, Status: "open"},
|
||||
},
|
||||
},
|
||||
{Address: "not-an-ip", Ports: []masscanlib.Port{{Number: 8554, Status: "open"}}},
|
||||
{Address: "", Ports: []masscanlib.Port{{Number: 8554, Status: "open"}}},
|
||||
},
|
||||
},
|
||||
wantStreams: []cameradar.Stream{
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 554},
|
||||
},
|
||||
wantProgress: []string{
|
||||
"Skipping invalid port 0 on 192.0.2.10",
|
||||
"Skipping invalid address \"not-an-ip\": ParseAddr(\"not-an-ip\"): unable to parse IP",
|
||||
"Skipping host with empty address",
|
||||
"Found 1 RTSP streams",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "collects streams from multiple hosts",
|
||||
result: &masscanlib.Run{
|
||||
Hosts: []masscanlib.Host{
|
||||
{Address: "192.0.2.10", Ports: []masscanlib.Port{{Number: 8554, Status: "open"}}},
|
||||
{Address: "198.51.100.9", Ports: []masscanlib.Port{{Number: 554, Status: "open"}}},
|
||||
},
|
||||
},
|
||||
wantStreams: []cameradar.Stream{
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 8554},
|
||||
{Address: netip.MustParseAddr("198.51.100.9"), Port: 554},
|
||||
},
|
||||
wantProgress: []string{"Found 2 RTSP streams"},
|
||||
},
|
||||
{
|
||||
name: "sets scheme for common HTTP ports",
|
||||
result: &masscanlib.Run{
|
||||
Hosts: []masscanlib.Host{
|
||||
{
|
||||
Address: "192.0.2.10",
|
||||
Ports: []masscanlib.Port{
|
||||
{Number: 554, Status: "open"},
|
||||
{Number: 80, Status: "open"},
|
||||
{Number: 443, Status: "open"},
|
||||
{Number: 8080, Status: "open"},
|
||||
{Number: 8443, Status: "open"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantStreams: []cameradar.Stream{
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 554},
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 80, Scheme: "http"},
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 443, Scheme: "https"},
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 8080, Scheme: "http"},
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 8443, Scheme: "https"},
|
||||
},
|
||||
wantProgress: []string{"Found 5 RTSP streams"},
|
||||
},
|
||||
{
|
||||
name: "returns error when scan fails",
|
||||
err: errors.New("scan failed"),
|
||||
wantErrContains: "scanning network",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
|
||||
streams, err := runScan(t.Context(), fakeRunner{result: test.result, err: test.err}, reporter)
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
assert.Empty(t, streams)
|
||||
assert.Empty(t, reporter.progress)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.wantStreams, streams)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
for _, progress := range test.wantProgress {
|
||||
assert.Contains(t, reporter.progress, progress)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeRunner struct {
|
||||
result *masscanlib.Run
|
||||
err error
|
||||
}
|
||||
|
||||
func (f fakeRunner) Run(context.Context) (*masscanlib.Run, error) {
|
||||
return f.result, f.err
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
debug []string
|
||||
progress []string
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Progress(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.progress = append(r.progress, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Debug(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.debug = append(r.debug, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
func (r *recordingReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
@@ -0,0 +1,124 @@
|
||||
package nmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/pkg/ports"
|
||||
nmaplib "github.com/Ullaakut/nmap/v4"
|
||||
)
|
||||
|
||||
// Reporter reports scan progress and debug information.
|
||||
type Reporter interface {
|
||||
Debug(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// Runner is something that can run an nmap scan.
|
||||
type Runner interface {
|
||||
Run(ctx context.Context) (*nmaplib.Run, error)
|
||||
}
|
||||
|
||||
// Scanner scans targets and ports for RTSP streams.
|
||||
type Scanner struct {
|
||||
runner Runner
|
||||
reporter Reporter
|
||||
}
|
||||
|
||||
// New returns a Scanner configured with the provided terminal and scan speed.
|
||||
func New(scanSpeed int16, targets, ports []string, reporter Reporter) (*Scanner, error) {
|
||||
runner, err := nmaplib.NewScanner(
|
||||
nmaplib.WithTargets(targets...),
|
||||
nmaplib.WithPorts(ports...),
|
||||
nmaplib.WithServiceInfo(),
|
||||
nmaplib.WithTimingTemplate(nmaplib.Timing(scanSpeed)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating nmap scanner: %w", err)
|
||||
}
|
||||
|
||||
return &Scanner{
|
||||
runner: runner,
|
||||
reporter: reporter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Scan discovers RTSP streams on the configured targets and ports.
|
||||
func (s *Scanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
return runScan(ctx, s.runner, s.reporter)
|
||||
}
|
||||
|
||||
func runScan(ctx context.Context, nmap Runner, reporter Reporter) ([]cameradar.Stream, error) {
|
||||
results, err := nmap.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning network: %w", err)
|
||||
}
|
||||
|
||||
for _, warning := range results.Warnings() {
|
||||
reporter.Debug(cameradar.StepScan, "nmap warning: "+warning)
|
||||
}
|
||||
|
||||
var streams []cameradar.Stream
|
||||
for _, host := range results.Hosts {
|
||||
for _, port := range host.Ports {
|
||||
if port.Status() != "open" {
|
||||
continue
|
||||
}
|
||||
|
||||
isCandidate := streamCandidate(port.Service.Name, port.ID)
|
||||
if !isCandidate {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, address := range host.Addresses {
|
||||
addr, err := netip.ParseAddr(address.Addr)
|
||||
if err != nil {
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Skipping invalid address %q: %v", address.Addr, err))
|
||||
continue
|
||||
}
|
||||
|
||||
scheme := ports.InferTunnelScheme(port.ID, port.Service.Name)
|
||||
streams = append(streams, cameradar.Stream{
|
||||
Device: port.Service.Product,
|
||||
Address: addr,
|
||||
Port: port.ID,
|
||||
Scheme: scheme,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Found %d RTSP streams", len(streams)))
|
||||
updateSummary(reporter, streams)
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
type summaryUpdater interface {
|
||||
UpdateSummary(streams []cameradar.Stream)
|
||||
}
|
||||
|
||||
func updateSummary(reporter Reporter, streams []cameradar.Stream) {
|
||||
updater, ok := reporter.(summaryUpdater)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
|
||||
// Extracting the classifying logic to an external function to avoid nesting if loops.
|
||||
func streamCandidate(serviceName string, port uint16) bool {
|
||||
serviceName = strings.ToLower(strings.TrimSpace(serviceName))
|
||||
if strings.Contains(serviceName, "rtsp") {
|
||||
return true
|
||||
}
|
||||
|
||||
if ports.InferTunnelScheme(port, serviceName) != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -0,0 +1,235 @@
|
||||
package nmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
nmaplib "github.com/Ullaakut/nmap/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestScanner_Scan(t *testing.T) {
|
||||
ctx := context.WithValue(t.Context(), contextKey("trace"), "scan")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result *nmaplib.Run
|
||||
err error
|
||||
wantStreams []cameradar.Stream
|
||||
wantDebug []string
|
||||
wantProgress string
|
||||
wantErrContains string
|
||||
}{
|
||||
{
|
||||
name: "filters non-rtsp and closed ports",
|
||||
result: buildRun(nmaplib.Host{
|
||||
Addresses: []nmaplib.Address{
|
||||
{Addr: "127.0.0.1"},
|
||||
{Addr: "not-an-ip"},
|
||||
},
|
||||
Ports: []nmaplib.Port{
|
||||
openPort(8554, "rtsp", "ACME"),
|
||||
closedPort(554, "rtsp", "ACME"),
|
||||
openPort(80, "http", "ACME"),
|
||||
},
|
||||
}),
|
||||
wantStreams: []cameradar.Stream{
|
||||
{
|
||||
Device: "ACME",
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 8554,
|
||||
},
|
||||
{
|
||||
Device: "ACME",
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 80,
|
||||
Scheme: "http",
|
||||
},
|
||||
},
|
||||
wantProgress: "Found 2 RTSP streams",
|
||||
},
|
||||
{
|
||||
name: "keeps rtsp and http candidates while filtering closed ports",
|
||||
result: buildRun(nmaplib.Host{
|
||||
Addresses: []nmaplib.Address{
|
||||
{Addr: "127.0.0.1"},
|
||||
{Addr: "not-an-ip"},
|
||||
},
|
||||
Ports: []nmaplib.Port{
|
||||
openPort(8554, "rtsp", "ACME"),
|
||||
closedPort(554, "rtsp", "ACME"),
|
||||
openPort(80, "http", "ACME"),
|
||||
openPort(9443, "https", "ACME"),
|
||||
openPort(8443, "", "ACME"),
|
||||
},
|
||||
}),
|
||||
wantStreams: []cameradar.Stream{
|
||||
{
|
||||
Device: "ACME",
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 8554,
|
||||
},
|
||||
{
|
||||
Device: "ACME",
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 80,
|
||||
Scheme: "http",
|
||||
},
|
||||
{
|
||||
Device: "ACME",
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 9443,
|
||||
Scheme: "https",
|
||||
},
|
||||
{
|
||||
Device: "ACME",
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 8443,
|
||||
Scheme: "https",
|
||||
},
|
||||
},
|
||||
wantProgress: "Found 4 RTSP streams",
|
||||
},
|
||||
{
|
||||
name: "collects multiple hosts",
|
||||
result: buildRun(
|
||||
nmaplib.Host{
|
||||
Addresses: []nmaplib.Address{{Addr: "192.0.2.10"}, {Addr: "192.0.2.11"}},
|
||||
Ports: []nmaplib.Port{
|
||||
openPort(8554, "rtsp-alt", "Model A"),
|
||||
},
|
||||
},
|
||||
nmaplib.Host{
|
||||
Addresses: []nmaplib.Address{{Addr: "198.51.100.9"}},
|
||||
Ports: []nmaplib.Port{
|
||||
openPort(554, "rtsp", "Model B"),
|
||||
},
|
||||
},
|
||||
),
|
||||
wantStreams: []cameradar.Stream{
|
||||
{
|
||||
Device: "Model A",
|
||||
Address: netip.MustParseAddr("192.0.2.10"),
|
||||
Port: 8554,
|
||||
},
|
||||
{
|
||||
Device: "Model A",
|
||||
Address: netip.MustParseAddr("192.0.2.11"),
|
||||
Port: 8554,
|
||||
},
|
||||
{
|
||||
Device: "Model B",
|
||||
Address: netip.MustParseAddr("198.51.100.9"),
|
||||
Port: 554,
|
||||
},
|
||||
},
|
||||
wantProgress: "Found 3 RTSP streams",
|
||||
},
|
||||
{
|
||||
name: "returns error when scan fails",
|
||||
err: errors.New("scan failed"),
|
||||
wantErrContains: "scanning network",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
|
||||
scanner, err := New(4, []string{"192.0.2.1"}, []string{"554", "8554"}, reporter)
|
||||
require.NoError(t, err)
|
||||
|
||||
scanner.runner = fakeRunner{result: test.result, err: test.err}
|
||||
|
||||
streams, err := scanner.Scan(ctx)
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
assert.Empty(t, streams)
|
||||
assert.Empty(t, reporter.progress)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.wantStreams, streams)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
assert.Contains(t, reporter.progress, test.wantProgress)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type contextKey string
|
||||
|
||||
type fakeRunner struct {
|
||||
result *nmaplib.Run
|
||||
err error
|
||||
}
|
||||
|
||||
func (f fakeRunner) Run(context.Context) (*nmaplib.Run, error) {
|
||||
return f.result, f.err
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
debug []string
|
||||
progress []string
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Progress(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.progress = append(r.progress, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Debug(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.debug = append(r.debug, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
func (r *recordingReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
|
||||
func buildRun(hosts ...nmaplib.Host) *nmaplib.Run {
|
||||
return &nmaplib.Run{Hosts: hosts}
|
||||
}
|
||||
|
||||
func openPort(id uint16, serviceName, product string) nmaplib.Port {
|
||||
return nmaplib.Port{
|
||||
ID: id,
|
||||
State: nmaplib.State{
|
||||
State: string(nmaplib.Open),
|
||||
},
|
||||
Service: nmaplib.Service{
|
||||
Name: serviceName,
|
||||
Product: product,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func closedPort(id uint16, serviceName, product string) nmaplib.Port {
|
||||
return nmaplib.Port{
|
||||
ID: id,
|
||||
State: nmaplib.State{
|
||||
State: string(nmaplib.Closed),
|
||||
},
|
||||
Service: nmaplib.Service{
|
||||
Name: serviceName,
|
||||
Product: product,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
package skip
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// Scanner is a stream scanner that skips discovery and treats every target/port as a stream.
|
||||
type Scanner struct {
|
||||
targets []string
|
||||
ports []string
|
||||
}
|
||||
|
||||
// New builds a scanner that skips discovery and treats every target/port as a stream.
|
||||
func New(targets, ports []string) *Scanner {
|
||||
return &Scanner{
|
||||
targets: targets,
|
||||
ports: ports,
|
||||
}
|
||||
}
|
||||
|
||||
// Scan returns the precomputed list of streams.
|
||||
func (s *Scanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
return buildStreamsFromTargets(ctx, s.targets, s.ports)
|
||||
}
|
||||
|
||||
func buildStreamsFromTargets(ctx context.Context, targets, ports []string) ([]cameradar.Stream, error) {
|
||||
resolvedPorts, err := parsePorts(ctx, ports)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resolvedPorts) == 0 {
|
||||
return nil, errors.New("no valid ports provided")
|
||||
}
|
||||
|
||||
resolvedTargets, err := expandTargets(ctx, targets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resolvedTargets) == 0 {
|
||||
return nil, errors.New("no valid target addresses resolved")
|
||||
}
|
||||
|
||||
streams := make([]cameradar.Stream, 0, len(resolvedTargets)*len(resolvedPorts))
|
||||
for _, addr := range resolvedTargets {
|
||||
for _, port := range resolvedPorts {
|
||||
streams = append(streams, cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func parsePorts(ctx context.Context, ports []string) ([]uint16, error) {
|
||||
seen := make(map[uint16]struct{})
|
||||
resolved := make([]uint16, 0, len(ports))
|
||||
|
||||
for _, entry := range ports {
|
||||
for raw := range strings.SplitSeq(entry, ",") {
|
||||
value := strings.TrimSpace(raw)
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
values, err := parsePortValue(ctx, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, port := range values {
|
||||
if _, exists := seen[port]; exists {
|
||||
continue
|
||||
}
|
||||
seen[port] = struct{}{}
|
||||
resolved = append(resolved, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
func parsePortValue(ctx context.Context, value string) ([]uint16, error) {
|
||||
if strings.Contains(value, "-") {
|
||||
parts := strings.SplitN(value, "-", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid port range %q", value)
|
||||
}
|
||||
|
||||
start, err := parsePortNumber(strings.TrimSpace(parts[0]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid port range %q: %w", value, err)
|
||||
}
|
||||
end, err := parsePortNumber(strings.TrimSpace(parts[1]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid port range %q: %w", value, err)
|
||||
}
|
||||
if start > end {
|
||||
return nil, fmt.Errorf("invalid port range %q", value)
|
||||
}
|
||||
|
||||
ports := make([]uint16, 0, end-start+1)
|
||||
for port := start; port <= end; port++ {
|
||||
ports = append(ports, port)
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
port, err := parsePortNumber(value)
|
||||
if err == nil {
|
||||
return []uint16{port}, nil
|
||||
}
|
||||
|
||||
servicePort, lookupErr := net.DefaultResolver.LookupPort(ctx, "tcp", value)
|
||||
if lookupErr != nil {
|
||||
return nil, fmt.Errorf("invalid port %q", value)
|
||||
}
|
||||
if servicePort < 1 || servicePort > 65535 {
|
||||
return nil, fmt.Errorf("port %d out of range", servicePort)
|
||||
}
|
||||
return []uint16{uint16(servicePort)}, nil
|
||||
}
|
||||
|
||||
func parsePortNumber(value string) (uint16, error) {
|
||||
port, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if port < 1 || port > 65535 {
|
||||
return 0, fmt.Errorf("port %d out of range", port)
|
||||
}
|
||||
return uint16(port), nil
|
||||
}
|
||||
|
||||
func expandTargets(ctx context.Context, targets []string) ([]netip.Addr, error) {
|
||||
seen := make(map[netip.Addr]struct{})
|
||||
resolved := make([]netip.Addr, 0, len(targets))
|
||||
|
||||
for _, target := range targets {
|
||||
value := strings.TrimSpace(target)
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
addrs, err := parseTargetAddrs(ctx, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if !addr.IsValid() {
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[addr]; exists {
|
||||
continue
|
||||
}
|
||||
seen[addr] = struct{}{}
|
||||
resolved = append(resolved, addr)
|
||||
}
|
||||
}
|
||||
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
func parseTargetAddrs(ctx context.Context, target string) ([]netip.Addr, error) {
|
||||
prefix, err := netip.ParsePrefix(target)
|
||||
if err == nil { // Return early.
|
||||
return expandPrefix(prefix), nil
|
||||
}
|
||||
|
||||
if strings.Contains(target, "-") {
|
||||
addrs, ok, err := parseIPv4Range(target)
|
||||
if ok {
|
||||
return addrs, err
|
||||
}
|
||||
}
|
||||
|
||||
addr, err := netip.ParseAddr(target)
|
||||
if err == nil { // Return early.
|
||||
return []netip.Addr{addr}, nil
|
||||
}
|
||||
|
||||
ips, err := net.DefaultResolver.LookupIPAddr(ctx, target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolving hostname %q: %w", target, err)
|
||||
}
|
||||
|
||||
addrs := make([]netip.Addr, 0, len(ips))
|
||||
for _, ip := range ips {
|
||||
addr, ok := netip.AddrFromSlice(ip.IP)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, addr.Unmap())
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("no ip addresses found for hostname %q", target)
|
||||
}
|
||||
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
func expandPrefix(prefix netip.Prefix) []netip.Addr {
|
||||
if !prefix.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
prefix = prefix.Masked()
|
||||
addr := prefix.Addr()
|
||||
addrs := make([]netip.Addr, 0, 16)
|
||||
|
||||
for current := addr; prefix.Contains(current); {
|
||||
addrs = append(addrs, current)
|
||||
next := current.Next()
|
||||
if !next.IsValid() {
|
||||
break
|
||||
}
|
||||
current = next
|
||||
}
|
||||
|
||||
return addrs
|
||||
}
|
||||
|
||||
type octetRange struct {
|
||||
start int
|
||||
end int
|
||||
}
|
||||
|
||||
func parseIPv4Range(target string) ([]netip.Addr, bool, error) {
|
||||
parts := strings.Split(target, ".")
|
||||
if len(parts) != 4 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
ranges := make([]octetRange, 4)
|
||||
for i, part := range parts {
|
||||
parsed, ok, err := parseOctetRange(part)
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
ranges[i] = parsed
|
||||
}
|
||||
|
||||
addrs := make([]netip.Addr, 0, 16)
|
||||
for first := ranges[0].start; first <= ranges[0].end; first++ {
|
||||
for second := ranges[1].start; second <= ranges[1].end; second++ {
|
||||
for third := ranges[2].start; third <= ranges[2].end; third++ {
|
||||
for fourth := ranges[3].start; fourth <= ranges[3].end; fourth++ {
|
||||
addrs = append(addrs, netip.AddrFrom4([4]byte{
|
||||
byte(first),
|
||||
byte(second),
|
||||
byte(third),
|
||||
byte(fourth),
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return addrs, true, nil
|
||||
}
|
||||
|
||||
func parseOctetRange(value string) (octetRange, bool, error) {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return octetRange{}, false, nil
|
||||
}
|
||||
|
||||
if strings.Contains(value, "-") {
|
||||
parts := strings.SplitN(value, "-", 2)
|
||||
if len(parts) != 2 {
|
||||
return octetRange{}, true, fmt.Errorf("invalid range %q", value)
|
||||
}
|
||||
|
||||
start, err := parseOctetValue(strings.TrimSpace(parts[0]))
|
||||
if err != nil {
|
||||
return octetRange{}, true, err
|
||||
}
|
||||
end, err := parseOctetValue(strings.TrimSpace(parts[1]))
|
||||
if err != nil {
|
||||
return octetRange{}, true, err
|
||||
}
|
||||
if start > end {
|
||||
return octetRange{}, true, fmt.Errorf("invalid range %q", value)
|
||||
}
|
||||
|
||||
return octetRange{start: start, end: end}, true, nil
|
||||
}
|
||||
|
||||
if !isDigits(value) {
|
||||
return octetRange{}, false, nil
|
||||
}
|
||||
|
||||
octet, err := parseOctetValue(value)
|
||||
if err != nil {
|
||||
return octetRange{}, true, err
|
||||
}
|
||||
|
||||
return octetRange{start: octet, end: octet}, true, nil
|
||||
}
|
||||
|
||||
func parseOctetValue(value string) (int, error) {
|
||||
if !isDigits(value) {
|
||||
return 0, fmt.Errorf("invalid octet %q", value)
|
||||
}
|
||||
parsed, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid octet %q", value)
|
||||
}
|
||||
if parsed < 0 || parsed > 255 {
|
||||
return 0, fmt.Errorf("octet %d out of range", parsed)
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func isDigits(value string) bool {
|
||||
for _, r := range value {
|
||||
if r < '0' || r > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return value != ""
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
package skip_test
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/skip"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew_ExpandsTargetsAndPorts(t *testing.T) {
|
||||
targets := []string{
|
||||
"192.0.2.0/30",
|
||||
"192.0.2.15",
|
||||
"192.0.2.10-11",
|
||||
}
|
||||
ports := []string{"554", "8554-8555"}
|
||||
|
||||
scanner := skip.New(targets, ports)
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
addrs := []netip.Addr{
|
||||
netip.MustParseAddr("192.0.2.0"),
|
||||
netip.MustParseAddr("192.0.2.1"),
|
||||
netip.MustParseAddr("192.0.2.2"),
|
||||
netip.MustParseAddr("192.0.2.3"),
|
||||
netip.MustParseAddr("192.0.2.10"),
|
||||
netip.MustParseAddr("192.0.2.11"),
|
||||
netip.MustParseAddr("192.0.2.15"),
|
||||
}
|
||||
portsExpected := []uint16{554, 8554, 8555}
|
||||
|
||||
var want []string
|
||||
for _, addr := range addrs {
|
||||
for _, port := range portsExpected {
|
||||
want = append(want, addr.String()+":"+strconv.Itoa(int(port)))
|
||||
}
|
||||
}
|
||||
|
||||
var got []string
|
||||
for _, stream := range streams {
|
||||
got = append(got, stream.Address.String()+":"+strconv.Itoa(int(stream.Port)))
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, want, got)
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnInvalidPortRange(t *testing.T) {
|
||||
scanner := skip.New([]string{"192.0.2.1"}, []string{"8555-8554"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid port range")
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnEmptyTargets(t *testing.T) {
|
||||
scanner := skip.New([]string{}, []string{"554"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "no valid target addresses resolved")
|
||||
}
|
||||
|
||||
func TestNew_ResolvesServicePorts(t *testing.T) {
|
||||
scanner := skip.New([]string{"127.0.0.1"}, []string{"http"})
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, streams, 1)
|
||||
|
||||
assert.Equal(t, netip.MustParseAddr("127.0.0.1"), streams[0].Address)
|
||||
assert.Equal(t, uint16(80), streams[0].Port)
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnUnknownServicePort(t *testing.T) {
|
||||
scanner := skip.New([]string{"127.0.0.1"}, []string{"not-a-service"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid port")
|
||||
}
|
||||
|
||||
func TestNew_ResolvesHostnames(t *testing.T) {
|
||||
scanner := skip.New([]string{"localhost"}, []string{"554"})
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, streams)
|
||||
addr := streams[0].Address
|
||||
assert.True(t,
|
||||
addr == netip.MustParseAddr("127.0.0.1") || addr == netip.MustParseAddr("::1"),
|
||||
"expected localhost to resolve to 127.0.0.1 or ::1, got %s",
|
||||
addr.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnHostnameLookupFailure(t *testing.T) {
|
||||
scanner := skip.New([]string{"does-not-exist.invalid"}, []string{"554"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "resolving hostname")
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func expandTargetsForScan(targets []string) ([]string, error) {
|
||||
expanded := make([]string, 0, len(targets))
|
||||
for _, target := range targets {
|
||||
value := strings.TrimSpace(target)
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
addrs, ok, err := parseIPv4RangePair(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
expanded = append(expanded, addrs...)
|
||||
continue
|
||||
}
|
||||
|
||||
expanded = append(expanded, value)
|
||||
}
|
||||
|
||||
return expanded, nil
|
||||
}
|
||||
|
||||
// Parse masscan range formats.
|
||||
func parseIPv4RangePair(target string) ([]string, bool, error) {
|
||||
parts := strings.SplitN(target, "-", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
startValue := strings.TrimSpace(parts[0])
|
||||
endValue := strings.TrimSpace(parts[1])
|
||||
if startValue == "" || endValue == "" {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Fall through if this is in nmap range format.
|
||||
if endIsOctet(endValue) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
startAddr, startOK := parseIPv4Addr(startValue)
|
||||
endAddr, endOK := parseIPv4Addr(endValue)
|
||||
if !startOK && !endOK { // Allows the case where the target is just a hostname with a dash.
|
||||
return nil, false, nil
|
||||
}
|
||||
if !startOK || !endOK { // Prevents the case where one is an address and the other part is not.
|
||||
return nil, false, fmt.Errorf("invalid range %q", target)
|
||||
}
|
||||
|
||||
startAddr = startAddr.Unmap()
|
||||
endAddr = endAddr.Unmap()
|
||||
if !startAddr.Is4() || !endAddr.Is4() {
|
||||
return nil, true, fmt.Errorf("invalid range %q", target)
|
||||
}
|
||||
|
||||
start := ipv4ToUint32(startAddr)
|
||||
end := ipv4ToUint32(endAddr)
|
||||
if start > end {
|
||||
return nil, true, fmt.Errorf("invalid range %q", target)
|
||||
}
|
||||
|
||||
return expandIPv4RangeToTargets(start, end), true, nil
|
||||
}
|
||||
|
||||
func parseIPv4Addr(value string) (netip.Addr, bool) {
|
||||
addr, err := netip.ParseAddr(value)
|
||||
if err != nil {
|
||||
return netip.Addr{}, false
|
||||
}
|
||||
return addr, true
|
||||
}
|
||||
|
||||
func endIsOctet(value string) bool {
|
||||
parsed, err := strconv.Atoi(strings.TrimSpace(value))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return parsed >= 0 && parsed <= 255
|
||||
}
|
||||
|
||||
func expandIPv4RangeToTargets(start, end uint32) []string {
|
||||
if start > end {
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxUint32 = uint64(^uint32(0))
|
||||
remaining := uint64(end) - uint64(start) + 1
|
||||
results := make([]string, 0, 16)
|
||||
|
||||
for current := uint64(start); remaining > 0; {
|
||||
if current > maxUint32 {
|
||||
return results
|
||||
}
|
||||
|
||||
current32 := uint32(current)
|
||||
maxSize := uint64(1) << bits.TrailingZeros32(current32)
|
||||
for maxSize > remaining {
|
||||
maxSize >>= 1
|
||||
}
|
||||
|
||||
prefixLen := 32 - (bits.Len64(maxSize) - 1)
|
||||
addr := uint32ToIPv4(current32)
|
||||
if maxSize == 1 {
|
||||
results = append(results, addr.String())
|
||||
} else {
|
||||
results = append(results, fmt.Sprintf("%s/%d", addr.String(), prefixLen))
|
||||
}
|
||||
|
||||
current += maxSize
|
||||
remaining -= maxSize
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func ipv4ToUint32(addr netip.Addr) uint32 {
|
||||
value := addr.As4()
|
||||
return uint32(value[0])<<24 | uint32(value[1])<<16 | uint32(value[2])<<8 | uint32(value[3])
|
||||
}
|
||||
|
||||
func uint32ToIPv4(value uint32) netip.Addr {
|
||||
return netip.AddrFrom4([4]byte{
|
||||
byte(value >> 24),
|
||||
byte(value >> 16),
|
||||
byte(value >> 8),
|
||||
byte(value),
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestExpandTargetsForScan_ExpandsFullIPv4Range(t *testing.T) {
|
||||
targets := []string{
|
||||
"192.0.2.10-192.0.2.12",
|
||||
"192.168.1.140-255",
|
||||
"192.0.2.0/30",
|
||||
"localhost",
|
||||
"",
|
||||
}
|
||||
|
||||
got, err := expandTargetsForScan(targets)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.ElementsMatch(t, []string{
|
||||
"192.0.2.10/31",
|
||||
"192.0.2.12",
|
||||
"192.168.1.140-255",
|
||||
"192.0.2.0/30",
|
||||
"localhost",
|
||||
}, got)
|
||||
}
|
||||
|
||||
func TestExpandTargetsForScan_ReturnsErrorOnInvalidRange(t *testing.T) {
|
||||
t.Run("inverted range", func(t *testing.T) {
|
||||
_, err := expandTargetsForScan([]string{"192.0.2.12-192.0.2.10"})
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid range")
|
||||
})
|
||||
|
||||
t.Run("invalid range", func(t *testing.T) {
|
||||
_, err := expandTargetsForScan([]string{"192.0.2.12-foo"})
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid range")
|
||||
})
|
||||
|
||||
t.Run("hostname with dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"my-host.com"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"my-host.com"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("ends with dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"a-"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"a-"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("starts with dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"-a"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"-a"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("only a dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"-"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"-"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("nmap format", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"192.168.1.10-255"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"192.168.1.10-255"}, tgts)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package ui
|
||||
|
||||
import "strings"
|
||||
|
||||
// BuildInfo represents build metadata injected at link time.
|
||||
type BuildInfo struct {
|
||||
Version string
|
||||
Commit string
|
||||
Date string
|
||||
}
|
||||
|
||||
// DisplayVersion returns the version prefixed with "v" when needed.
|
||||
func (b BuildInfo) DisplayVersion() string {
|
||||
version := strings.TrimSpace(b.Version)
|
||||
if version == "" {
|
||||
version = "dev"
|
||||
}
|
||||
if strings.HasPrefix(version, "v") {
|
||||
return version
|
||||
}
|
||||
return "v" + version
|
||||
}
|
||||
|
||||
// LogVersion returns the version without a leading "v".
|
||||
func (b BuildInfo) LogVersion() string {
|
||||
version := strings.TrimSpace(b.Version)
|
||||
if version == "" {
|
||||
return "dev"
|
||||
}
|
||||
return strings.TrimPrefix(version, "v")
|
||||
}
|
||||
|
||||
// ShortCommit returns a shortened commit hash suitable for display.
|
||||
func (b BuildInfo) ShortCommit() string {
|
||||
commit := strings.TrimSpace(b.Commit)
|
||||
if commit == "" || commit == "none" || commit == "unknown" {
|
||||
return "unknown"
|
||||
}
|
||||
if len(commit) > 7 {
|
||||
return commit[:7]
|
||||
}
|
||||
return commit
|
||||
}
|
||||
|
||||
// TUIHeader returns the header used by the TUI.
|
||||
func (b BuildInfo) TUIHeader() string {
|
||||
return "Cameradar — " + b.DisplayVersion() + " (" + b.ShortCommit() + ")"
|
||||
}
|
||||
@@ -0,0 +1,175 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuildInfo_DisplayVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty defaults to dev with prefix",
|
||||
version: "",
|
||||
want: "vdev",
|
||||
},
|
||||
{
|
||||
name: "dev without prefix",
|
||||
version: "dev",
|
||||
want: "vdev",
|
||||
},
|
||||
{
|
||||
name: "already prefixed",
|
||||
version: "v1.2.3",
|
||||
want: "v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "adds prefix",
|
||||
version: "1.2.3",
|
||||
want: "v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "trims spaces with prefix",
|
||||
version: " v2.0 ",
|
||||
want: "v2.0",
|
||||
},
|
||||
{
|
||||
name: "trims spaces without prefix",
|
||||
version: " 2.0 ",
|
||||
want: "v2.0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Version: test.version}
|
||||
assert.Equal(t, test.want, info.DisplayVersion())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildInfo_LogVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty defaults to dev",
|
||||
version: "",
|
||||
want: "dev",
|
||||
},
|
||||
{
|
||||
name: "removes leading v",
|
||||
version: "v1.2.3",
|
||||
want: "1.2.3",
|
||||
},
|
||||
{
|
||||
name: "keeps version without prefix",
|
||||
version: "1.2.3",
|
||||
want: "1.2.3",
|
||||
},
|
||||
{
|
||||
name: "trims spaces and removes prefix",
|
||||
version: " v2.0 ",
|
||||
want: "2.0",
|
||||
},
|
||||
{
|
||||
name: "removes only first prefix",
|
||||
version: "vv1",
|
||||
want: "v1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Version: test.version}
|
||||
assert.Equal(t, test.want, info.LogVersion())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildInfo_ShortCommit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
commit string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty defaults to unknown",
|
||||
commit: "",
|
||||
want: "unknown",
|
||||
},
|
||||
{
|
||||
name: "none defaults to unknown",
|
||||
commit: "none",
|
||||
want: "unknown",
|
||||
},
|
||||
{
|
||||
name: "unknown defaults to unknown",
|
||||
commit: "unknown",
|
||||
want: "unknown",
|
||||
},
|
||||
{
|
||||
name: "short commit preserved",
|
||||
commit: "abcdef",
|
||||
want: "abcdef",
|
||||
},
|
||||
{
|
||||
name: "seven chars preserved",
|
||||
commit: "abcdefg",
|
||||
want: "abcdefg",
|
||||
},
|
||||
{
|
||||
name: "long commit shortened",
|
||||
commit: "abcdefghi",
|
||||
want: "abcdefg",
|
||||
},
|
||||
{
|
||||
name: "trims spaces before shortening",
|
||||
commit: " 1234567890 ",
|
||||
want: "1234567",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Commit: test.commit}
|
||||
assert.Equal(t, test.want, info.ShortCommit())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildInfo_TUIHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
commit string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "uses display version and short commit",
|
||||
version: "1.2.3",
|
||||
commit: "abcdefghi",
|
||||
want: "Cameradar — v1.2.3 (abcdefg)",
|
||||
},
|
||||
{
|
||||
name: "uses defaults for empty values",
|
||||
version: "",
|
||||
commit: "",
|
||||
want: "Cameradar — vdev (unknown)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Version: test.version, Commit: test.commit}
|
||||
assert.Equal(t, test.want, info.TUIHeader())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// NopReporter discards all UI events.
|
||||
type NopReporter struct{}
|
||||
|
||||
// Start implements Reporter.
|
||||
func (NopReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
// Done implements Reporter.
|
||||
func (NopReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
// Progress implements Reporter.
|
||||
func (NopReporter) Progress(cameradar.Step, string) {}
|
||||
|
||||
// Debug implements Reporter.
|
||||
func (NopReporter) Debug(cameradar.Step, string) {}
|
||||
|
||||
// Error implements Reporter.
|
||||
func (NopReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
// Summary implements Reporter.
|
||||
func (NopReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
// Close implements Reporter.
|
||||
func (NopReporter) Close() {}
|
||||
@@ -0,0 +1,104 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// PlainReporter renders a line-oriented UI for non-interactive terminals.
|
||||
type PlainReporter struct {
|
||||
out io.Writer
|
||||
debug bool
|
||||
}
|
||||
|
||||
// NewPlainReporter creates a line-oriented reporter.
|
||||
func NewPlainReporter(out io.Writer, debug bool) *PlainReporter {
|
||||
return &PlainReporter{
|
||||
out: out,
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
// PrintStartup prints build metadata and configuration options.
|
||||
func (r *PlainReporter) PrintStartup(buildInfo BuildInfo, options []string) {
|
||||
step := cameradar.Step("Startup")
|
||||
message := fmt.Sprintf("Running cameradar version %s, commit %s", buildInfo.LogVersion(), buildInfo.ShortCommit())
|
||||
r.print(step, "INFO", message)
|
||||
if len(options) == 0 {
|
||||
return
|
||||
}
|
||||
for _, option := range options {
|
||||
r.print(step, "INFO", option)
|
||||
}
|
||||
}
|
||||
|
||||
// Start prints the beginning of a step.
|
||||
func (r *PlainReporter) Start(step cameradar.Step, message string) {
|
||||
r.print(step, "STEP", message)
|
||||
}
|
||||
|
||||
// Done prints the completion of a step.
|
||||
func (r *PlainReporter) Done(step cameradar.Step, message string) {
|
||||
r.print(step, "DONE", message)
|
||||
}
|
||||
|
||||
// Progress prints a progress message.
|
||||
func (r *PlainReporter) Progress(step cameradar.Step, message string) {
|
||||
if _, _, ok := cameradar.ParseProgressMessage(message); ok {
|
||||
return
|
||||
}
|
||||
r.print(step, "INFO", message)
|
||||
}
|
||||
|
||||
// Debug prints a debug message when debug mode is enabled.
|
||||
func (r *PlainReporter) Debug(step cameradar.Step, message string) {
|
||||
if !r.debug {
|
||||
return
|
||||
}
|
||||
r.print(step, "DBUG", message)
|
||||
}
|
||||
|
||||
// Error prints an error message.
|
||||
func (r *PlainReporter) Error(step cameradar.Step, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
r.print(step, "EROR", err.Error())
|
||||
}
|
||||
|
||||
// Summary prints the final summary.
|
||||
func (r *PlainReporter) Summary(streams []cameradar.Stream, err error) {
|
||||
_, _ = fmt.Fprintln(r.out, "Summary")
|
||||
_, _ = fmt.Fprintln(r.out, "-------")
|
||||
_, _ = fmt.Fprintln(r.out, FormatSummary(streams, err))
|
||||
}
|
||||
|
||||
// Close is a no-op for the plain reporter.
|
||||
func (r *PlainReporter) Close() {}
|
||||
|
||||
func (r *PlainReporter) print(step cameradar.Step, level, message string) {
|
||||
if message == "" {
|
||||
return
|
||||
}
|
||||
|
||||
level = normalizeLevel(level)
|
||||
_, _ = fmt.Fprintf(r.out, "%s [%s] %s: %s\n", time.Now().Format(time.RFC3339), level, cameradar.StepLabel(step), message)
|
||||
}
|
||||
|
||||
func normalizeLevel(level string) string {
|
||||
switch level {
|
||||
case "DEBUG":
|
||||
return "DBUG"
|
||||
case "ERROR":
|
||||
return "EROR"
|
||||
case "START", "STEP":
|
||||
return "STEP"
|
||||
}
|
||||
if len(level) >= 4 {
|
||||
return level[:4]
|
||||
}
|
||||
return fmt.Sprintf("%-4s", level)
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPlainReporter_Outputs(t *testing.T) {
|
||||
t.Run("prints events", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, true)
|
||||
|
||||
reporter.Start(cameradar.StepScan, "starting")
|
||||
reporter.Progress(cameradar.StepScan, "working")
|
||||
reporter.Debug(cameradar.StepScan, "details")
|
||||
reporter.Done(cameradar.StepScan, "finished")
|
||||
reporter.Error(cameradar.StepScan, errors.New("boom"))
|
||||
reporter.Summary([]cameradar.Stream{}, nil)
|
||||
|
||||
content := out.String()
|
||||
assert.Contains(t, content, " [STEP] Scan targets: starting")
|
||||
assert.Contains(t, content, " [INFO] Scan targets: working")
|
||||
assert.Contains(t, content, " [DBUG] Scan targets: details")
|
||||
assert.Contains(t, content, " [DONE] Scan targets: finished")
|
||||
assert.Contains(t, content, " [EROR] Scan targets: boom")
|
||||
assert.Contains(t, content, "Summary\n-------\nAccessible streams: 0")
|
||||
})
|
||||
|
||||
t.Run("respects debug flag and empty input", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, false)
|
||||
|
||||
reporter.Debug(cameradar.StepScan, "hidden")
|
||||
reporter.Progress(cameradar.StepScan, "")
|
||||
reporter.Error(cameradar.StepScan, nil)
|
||||
|
||||
content := out.String()
|
||||
assert.NotContains(t, content, "DBUG")
|
||||
assert.Equal(t, "", strings.TrimSpace(content))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPlainReporter_PrintStartup(t *testing.T) {
|
||||
t.Run("prints build info and options", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, false)
|
||||
|
||||
reporter.PrintStartup(ui.BuildInfo{Version: "v1.2.3", Commit: "abcdefghi"}, []string{
|
||||
"targets: 127.0.0.1",
|
||||
"ports: 554",
|
||||
})
|
||||
|
||||
content := out.String()
|
||||
assert.Contains(t, content, " [INFO] Startup: Running cameradar version 1.2.3, commit abcdefg")
|
||||
assert.Contains(t, content, " [INFO] Startup: targets: 127.0.0.1")
|
||||
assert.Contains(t, content, " [INFO] Startup: ports: 554")
|
||||
})
|
||||
|
||||
t.Run("prints only build info when options empty", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, false)
|
||||
|
||||
reporter.PrintStartup(ui.BuildInfo{Version: "", Commit: "none"}, nil)
|
||||
|
||||
content := out.String()
|
||||
assert.Contains(t, content, " [INFO] Startup: Running cameradar version dev, commit unknown")
|
||||
assert.Equal(t, 1, strings.Count(content, " Startup: "))
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// Reporter defines the interface for cameradar UIs.
|
||||
type Reporter interface {
|
||||
Start(step cameradar.Step, message string)
|
||||
Done(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
Debug(step cameradar.Step, message string)
|
||||
Error(step cameradar.Step, err error)
|
||||
Summary(streams []cameradar.Stream, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
// NewReporter creates a Reporter based on the requested mode.
|
||||
func NewReporter(mode cameradar.Mode, debug bool, out io.Writer, interactive bool, buildInfo BuildInfo, cancel context.CancelFunc) (Reporter, error) {
|
||||
if debug {
|
||||
return NewPlainReporter(out, debug), nil
|
||||
}
|
||||
|
||||
switch mode {
|
||||
case cameradar.ModePlain:
|
||||
return NewPlainReporter(out, debug), nil
|
||||
case cameradar.ModeTUI:
|
||||
if !interactive {
|
||||
return nil, errors.New("tui mode requires an interactive terminal")
|
||||
}
|
||||
return NewTUIReporter(debug, out, buildInfo, cancel)
|
||||
case cameradar.ModeAuto:
|
||||
if interactive {
|
||||
return NewTUIReporter(debug, out, buildInfo, cancel)
|
||||
}
|
||||
return NewPlainReporter(out, debug), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported ui mode %q", mode)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewReporter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mode cameradar.Mode
|
||||
interactive bool
|
||||
wantType string
|
||||
wantErrContains string
|
||||
}{
|
||||
{
|
||||
name: "plain",
|
||||
mode: cameradar.ModePlain,
|
||||
interactive: false,
|
||||
wantType: "plain",
|
||||
},
|
||||
{
|
||||
name: "auto non-interactive",
|
||||
mode: cameradar.ModeAuto,
|
||||
interactive: false,
|
||||
wantType: "plain",
|
||||
},
|
||||
{
|
||||
name: "tui non-interactive",
|
||||
mode: cameradar.ModeTUI,
|
||||
interactive: false,
|
||||
wantErrContains: "interactive terminal",
|
||||
},
|
||||
{
|
||||
name: "unsupported",
|
||||
mode: cameradar.Mode("unknown"),
|
||||
interactive: false,
|
||||
wantErrContains: "unsupported ui mode",
|
||||
},
|
||||
{
|
||||
name: "auto interactive",
|
||||
mode: cameradar.ModeAuto,
|
||||
interactive: true,
|
||||
wantType: "tui",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
|
||||
reporter, err := ui.NewReporter(test.mode, false, out, test.interactive, ui.BuildInfo{Version: "dev", Commit: "none"}, func() {})
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
assert.Nil(t, reporter)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, reporter)
|
||||
|
||||
switch test.wantType {
|
||||
case "plain":
|
||||
_, ok := reporter.(*ui.PlainReporter)
|
||||
assert.True(t, ok)
|
||||
case "tui":
|
||||
_, ok := reporter.(*ui.TUIReporter)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
reporter.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNopReporter_DoesNotPanic(t *testing.T) {
|
||||
reporter := ui.NopReporter{}
|
||||
assert.NotPanics(t, func() {
|
||||
reporter.Start(cameradar.StepScan, "start")
|
||||
reporter.Done(cameradar.StepScan, "done")
|
||||
reporter.Progress(cameradar.StepScan, "progress")
|
||||
reporter.Debug(cameradar.StepScan, "debug")
|
||||
reporter.Error(cameradar.StepScan, assert.AnError)
|
||||
reporter.Summary(nil, nil)
|
||||
reporter.Close()
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,283 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/charmbracelet/bubbles/progress"
|
||||
"github.com/charmbracelet/bubbles/spinner"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
type modelState struct {
|
||||
steps []cameradar.Step
|
||||
status map[cameradar.Step]state
|
||||
logs []logMsg
|
||||
summaryStreams []cameradar.Stream
|
||||
summaryFinal bool
|
||||
buildInfo BuildInfo
|
||||
cancel context.CancelFunc
|
||||
debug bool
|
||||
spinner spinner.Model
|
||||
progress progress.Model
|
||||
width int
|
||||
height int
|
||||
quitting bool
|
||||
progressTotals map[cameradar.Step]int
|
||||
progressCounts map[cameradar.Step]int
|
||||
progressTarget float64
|
||||
progressVisible float64
|
||||
}
|
||||
|
||||
func (m *modelState) Init() tea.Cmd {
|
||||
return m.spinner.Tick
|
||||
}
|
||||
|
||||
func (m *modelState) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmds []tea.Cmd
|
||||
|
||||
switch typed := msg.(type) {
|
||||
case stepMsg:
|
||||
m.handleStepMsg(typed)
|
||||
case logMsg:
|
||||
m.handleLogMsg(typed)
|
||||
case summaryMsg:
|
||||
m.handleSummaryMsg(typed)
|
||||
case progressMsg:
|
||||
m.handleProgressMsg(typed)
|
||||
case closeMsg:
|
||||
m.quitting = true
|
||||
case tea.KeyMsg:
|
||||
if typed.Type == tea.KeyCtrlC {
|
||||
if m.cancel != nil {
|
||||
m.cancel()
|
||||
}
|
||||
m.quitting = true
|
||||
return m, tea.Quit
|
||||
}
|
||||
case spinner.TickMsg:
|
||||
cmds = m.handleSpinnerMsg(typed)
|
||||
case tea.WindowSizeMsg:
|
||||
m.handleWindowSizeMsg(typed)
|
||||
case progress.FrameMsg:
|
||||
}
|
||||
|
||||
if len(cmds) == 0 {
|
||||
return m, nil
|
||||
}
|
||||
return m, tea.Batch(cmds...)
|
||||
}
|
||||
|
||||
func (m *modelState) handleStepMsg(msg stepMsg) {
|
||||
m.status[msg.step] = msg.state
|
||||
if msg.message != "" {
|
||||
level := logInfo
|
||||
if msg.state == stateError {
|
||||
level = logError
|
||||
}
|
||||
m.logs = append(m.logs, logMsg{level: level, step: msg.step, message: msg.message})
|
||||
}
|
||||
if msg.state == stateDone || msg.state == stateError {
|
||||
markStepComplete(m, msg.step)
|
||||
queueProgressUpdate(m)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *modelState) handleLogMsg(msg logMsg) {
|
||||
m.logs = append(m.logs, msg)
|
||||
}
|
||||
|
||||
func (m *modelState) handleSummaryMsg(msg summaryMsg) {
|
||||
m.summaryStreams = msg.streams
|
||||
m.summaryFinal = msg.final
|
||||
if msg.final {
|
||||
m.status[cameradar.StepSummary] = stateDone
|
||||
markStepComplete(m, cameradar.StepSummary)
|
||||
queueProgressUpdate(m)
|
||||
m.quitting = true
|
||||
}
|
||||
}
|
||||
|
||||
func (m *modelState) handleProgressMsg(msg progressMsg) {
|
||||
if msg.total > 0 {
|
||||
m.progressTotals[msg.step] = msg.total
|
||||
if m.progressCounts[msg.step] > msg.total {
|
||||
m.progressCounts[msg.step] = msg.total
|
||||
}
|
||||
}
|
||||
|
||||
if msg.increment > 0 {
|
||||
m.progressCounts[msg.step] += msg.increment
|
||||
total := m.progressTotals[msg.step]
|
||||
if total > 0 && m.progressCounts[msg.step] > total {
|
||||
m.progressCounts[msg.step] = total
|
||||
}
|
||||
}
|
||||
|
||||
queueProgressUpdate(m)
|
||||
}
|
||||
|
||||
func (m *modelState) handleSpinnerMsg(msg spinner.TickMsg) []tea.Cmd {
|
||||
var cmds []tea.Cmd
|
||||
var cmd tea.Cmd
|
||||
m.spinner, cmd = m.spinner.Update(msg)
|
||||
cmds = append(cmds, cmd)
|
||||
advanceProgress(m)
|
||||
if m.quitting && progressComplete(*m) {
|
||||
cmds = append(cmds, tea.Quit)
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
func (m *modelState) handleWindowSizeMsg(msg tea.WindowSizeMsg) {
|
||||
m.width = msg.Width
|
||||
m.height = msg.Height
|
||||
m.progress.Width = progressWidth(msg.Width)
|
||||
}
|
||||
|
||||
func (m *modelState) View() string {
|
||||
var builder strings.Builder
|
||||
header := sectionStyle.Render(m.buildInfo.TUIHeader())
|
||||
headerLines := splitLines(header)
|
||||
builder.WriteString(strings.Join(headerLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
stepsLines := m.renderSteps()
|
||||
builder.WriteString(strings.Join(stepsLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
summaryHeight, logsHeight := m.layoutHeights(len(headerLines), len(stepsLines))
|
||||
logsLines := m.renderLogs(logsHeight)
|
||||
builder.WriteString(sectionStyle.Render("Logs"))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(strings.Join(logsLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
rowsToShow := max(1, summaryHeight-2)
|
||||
summaryTitle := renderSummaryTitle(m.summaryStreams)
|
||||
summaryTables := buildSummaryTables(m.summaryStreams, m.width, m.status, rowsToShow)
|
||||
builder.WriteString(sectionStyle.Render(summaryTitle))
|
||||
builder.WriteString("\n")
|
||||
for i, summary := range summaryTables {
|
||||
builder.WriteString(summaryTableStyle.Render(summary.table.View()))
|
||||
if i < len(summaryTables)-1 {
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (m *modelState) FinalView() string {
|
||||
var builder strings.Builder
|
||||
header := sectionStyle.Render(m.buildInfo.TUIHeader())
|
||||
headerLines := splitLines(header)
|
||||
builder.WriteString(strings.Join(headerLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
stepsLines := m.renderSteps()
|
||||
builder.WriteString(strings.Join(stepsLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
builder.WriteString(sectionStyle.Render("Logs"))
|
||||
builder.WriteString("\n")
|
||||
logLines := m.renderLogsAll()
|
||||
if len(logLines) == 0 {
|
||||
builder.WriteString(dimStyle.Render("No events yet."))
|
||||
} else {
|
||||
builder.WriteString(strings.Join(logLines, "\n"))
|
||||
}
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
summaryTitle := renderSummaryTitle(m.summaryStreams)
|
||||
visibility := summaryVisibility(summaryStatusAllDone())
|
||||
accessible, others := partitionStreams(m.summaryStreams)
|
||||
rows := append(buildSummaryRows(accessible, visibility), buildSummaryRows(others, visibility)...)
|
||||
if len(rows) == 0 {
|
||||
rows = []table.Row{emptySummaryRow()}
|
||||
}
|
||||
columns := summaryColumns(m.width, rows)
|
||||
builder.WriteString(sectionStyle.Render(summaryTitle))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryTablePlain(columns, rows))
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (m *modelState) renderSteps() []string {
|
||||
lines := []string{sectionStyle.Render("Steps"), renderProgress(m)}
|
||||
spinnerView := m.spinner.View()
|
||||
for _, step := range m.steps {
|
||||
lines = append(lines, renderStep(step, m.status[step], spinnerView))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func (m *modelState) renderLogs(height int) []string {
|
||||
if height <= 0 {
|
||||
return nil
|
||||
}
|
||||
if len(m.logs) == 0 {
|
||||
lines := []string{dimStyle.Render("No events yet.")}
|
||||
return padLines(lines, height)
|
||||
}
|
||||
|
||||
start := 0
|
||||
if len(m.logs) > height {
|
||||
start = len(m.logs) - height
|
||||
}
|
||||
lines := make([]string, 0, min(height, len(m.logs)))
|
||||
for _, entry := range m.logs[start:] {
|
||||
lines = append(lines, renderLog(entry))
|
||||
}
|
||||
return padLines(lines, height)
|
||||
}
|
||||
|
||||
func (m *modelState) renderLogsAll() []string {
|
||||
if len(m.logs) == 0 {
|
||||
return nil
|
||||
}
|
||||
lines := make([]string, 0, len(m.logs))
|
||||
for _, entry := range m.logs {
|
||||
lines = append(lines, renderLog(entry))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func (m *modelState) layoutHeights(headerLines, stepsLines int) (summaryHeight, logsHeight int) {
|
||||
if m.height <= 0 {
|
||||
return summaryMinHeight, len(m.logs)
|
||||
}
|
||||
|
||||
reserved := headerLines + 1 + stepsLines + 1 + 1 + 1
|
||||
remaining := m.height - reserved
|
||||
remaining = max(0, remaining)
|
||||
|
||||
switch {
|
||||
case remaining < summaryMinHeight:
|
||||
summaryHeight = max(3, remaining)
|
||||
case remaining > summaryMaxHeight:
|
||||
summaryHeight = summaryMaxHeight
|
||||
default:
|
||||
summaryHeight = remaining
|
||||
}
|
||||
|
||||
logsHeight = max(0, remaining-summaryHeight)
|
||||
|
||||
return summaryHeight, logsHeight
|
||||
}
|
||||
|
||||
func padLines(lines []string, height int) []string {
|
||||
if height <= 0 {
|
||||
return lines
|
||||
}
|
||||
for len(lines) < height {
|
||||
lines = append(lines, "")
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func splitLines(value string) []string {
|
||||
return strings.Split(value, "\n")
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
package ui
|
||||
|
||||
import "github.com/charmbracelet/lipgloss"
|
||||
|
||||
var (
|
||||
sectionStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("63"))
|
||||
infoStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("252"))
|
||||
debugStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("244"))
|
||||
successStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("42"))
|
||||
activeStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("39"))
|
||||
errorStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("203"))
|
||||
dimStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241"))
|
||||
summaryTableStyle = lipgloss.NewStyle().BorderStyle(lipgloss.NormalBorder()).BorderForeground(lipgloss.Color("240"))
|
||||
)
|
||||
@@ -0,0 +1,153 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// FormatSummary builds a human-readable summary of discovered streams.
|
||||
func FormatSummary(streams []cameradar.Stream, _ error) string {
|
||||
accessible, others := partitionStreams(streams)
|
||||
|
||||
var builder strings.Builder
|
||||
builder.WriteString(fmt.Sprintf("Accessible streams: %d\n", len(accessible)))
|
||||
if len(accessible) == 0 {
|
||||
builder.WriteString("• None\n")
|
||||
} else {
|
||||
for _, stream := range accessible {
|
||||
builder.WriteString(formatStream(stream))
|
||||
}
|
||||
}
|
||||
|
||||
if len(others) > 0 {
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(fmt.Sprintf("Other discovered streams: %d\n", len(others)))
|
||||
for _, stream := range others {
|
||||
builder.WriteString(formatStream(stream))
|
||||
}
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func partitionStreams(streams []cameradar.Stream) ([]cameradar.Stream, []cameradar.Stream) {
|
||||
var accessible []cameradar.Stream
|
||||
var others []cameradar.Stream
|
||||
for _, stream := range streams {
|
||||
if stream.Available {
|
||||
accessible = append(accessible, stream)
|
||||
} else {
|
||||
others = append(others, stream)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort streams by address and port.
|
||||
sort.Slice(accessible, func(i, j int) bool {
|
||||
if accessible[i].Address.String() == accessible[j].Address.String() {
|
||||
return accessible[i].Port < accessible[j].Port
|
||||
}
|
||||
return accessible[i].Address.String() < accessible[j].Address.String()
|
||||
})
|
||||
sort.Slice(others, func(i, j int) bool {
|
||||
if others[i].Address.String() == others[j].Address.String() {
|
||||
return others[i].Port < others[j].Port
|
||||
}
|
||||
return others[i].Address.String() < others[j].Address.String()
|
||||
})
|
||||
|
||||
return accessible, others
|
||||
}
|
||||
|
||||
func formatStream(stream cameradar.Stream) string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("• ")
|
||||
builder.WriteString(stream.Address.String())
|
||||
builder.WriteString(":")
|
||||
builder.WriteString(strconv.FormatUint(uint64(stream.Port), 10))
|
||||
|
||||
if stream.Device != "" {
|
||||
builder.WriteString(" (")
|
||||
builder.WriteString(stream.Device)
|
||||
builder.WriteString(")")
|
||||
}
|
||||
builder.WriteString("\n")
|
||||
|
||||
builder.WriteString(" Authentication: ")
|
||||
builder.WriteString(authTypeLabel(stream.AuthenticationType))
|
||||
builder.WriteString("\n")
|
||||
|
||||
if len(stream.Routes) > 0 {
|
||||
builder.WriteString(" Routes: ")
|
||||
builder.WriteString(strings.Join(stream.Routes, ", "))
|
||||
builder.WriteString("\n")
|
||||
} else {
|
||||
builder.WriteString(" Routes: not found\n")
|
||||
}
|
||||
|
||||
if stream.CredentialsFound || stream.AuthenticationType == cameradar.AuthNone {
|
||||
builder.WriteString(" Credentials: ")
|
||||
builder.WriteString(formatCredentials(stream))
|
||||
builder.WriteString("\n")
|
||||
} else {
|
||||
builder.WriteString(" Credentials: not found\n")
|
||||
}
|
||||
|
||||
builder.WriteString(" Availability: ")
|
||||
if stream.Available {
|
||||
builder.WriteString("yes\n")
|
||||
} else {
|
||||
builder.WriteString("no\n")
|
||||
}
|
||||
|
||||
if stream.RouteFound && (stream.CredentialsFound || stream.AuthenticationType == cameradar.AuthNone) {
|
||||
builder.WriteString(" RTSP URL: ")
|
||||
builder.WriteString(formatRTSPURL(stream))
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
|
||||
builder.WriteString(" Admin panel: ")
|
||||
builder.WriteString(formatAdminPanelURL(stream))
|
||||
builder.WriteString("\n")
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func formatRTSPURL(stream cameradar.Stream) string {
|
||||
path := "/" + strings.TrimLeft(strings.TrimSpace(stream.Route()), "/")
|
||||
|
||||
credentials := ""
|
||||
if stream.Username != "" || stream.Password != "" {
|
||||
credentials = stream.Username + ":" + stream.Password + "@"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("rtsp://%s%s:%d%s", credentials, stream.Address.String(), stream.Port, path)
|
||||
}
|
||||
|
||||
func formatAdminPanelURL(stream cameradar.Stream) string {
|
||||
return fmt.Sprintf("http://%s/", stream.Address.String())
|
||||
}
|
||||
|
||||
func formatCredentials(stream cameradar.Stream) string {
|
||||
if stream.Username == "" && stream.Password == "" {
|
||||
return "none"
|
||||
}
|
||||
|
||||
return stream.Username + ":" + stream.Password
|
||||
}
|
||||
|
||||
func authTypeLabel(auth cameradar.AuthType) string {
|
||||
switch auth {
|
||||
case cameradar.AuthNone:
|
||||
return "none"
|
||||
case cameradar.AuthBasic:
|
||||
return "basic"
|
||||
case cameradar.AuthDigest:
|
||||
return "digest"
|
||||
default:
|
||||
return fmt.Sprintf("unknown(%d)", auth)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,130 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFormatSummary(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
streams []cameradar.Stream
|
||||
err error
|
||||
wantContains []string
|
||||
wantNotContains []string
|
||||
orderedPairs [][2]string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
streams: nil,
|
||||
wantContains: []string{
|
||||
"Accessible streams: 0",
|
||||
"• None",
|
||||
},
|
||||
wantNotContains: []string{
|
||||
"Other discovered streams",
|
||||
"Error:",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed streams with error",
|
||||
streams: []cameradar.Stream{
|
||||
{
|
||||
Device: "Model B",
|
||||
Address: netip.MustParseAddr("10.0.0.2"),
|
||||
Port: 554,
|
||||
Available: true,
|
||||
AuthenticationType: cameradar.AuthNone,
|
||||
},
|
||||
{
|
||||
Device: "Model A",
|
||||
Address: netip.MustParseAddr("10.0.0.1"),
|
||||
Port: 8554,
|
||||
Available: true,
|
||||
Routes: []string{"stream1", "stream2"},
|
||||
RouteFound: true,
|
||||
CredentialsFound: true,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
AuthenticationType: cameradar.AuthBasic,
|
||||
},
|
||||
{
|
||||
Address: netip.MustParseAddr("10.0.0.3"),
|
||||
Port: 554,
|
||||
Available: false,
|
||||
AuthenticationType: cameradar.AuthDigest,
|
||||
},
|
||||
},
|
||||
err: errors.New("boom"),
|
||||
wantContains: []string{
|
||||
"Accessible streams: 2",
|
||||
"Other discovered streams: 1",
|
||||
"• 10.0.0.1:8554 (Model A)",
|
||||
"• 10.0.0.2:554 (Model B)",
|
||||
"• 10.0.0.3:554",
|
||||
"Authentication: basic",
|
||||
"Authentication: none",
|
||||
"Authentication: digest",
|
||||
"Routes: stream1, stream2",
|
||||
"Credentials: user:pass",
|
||||
"Credentials: none",
|
||||
"RTSP URL: rtsp://user:pass@10.0.0.1:8554/stream1",
|
||||
"Admin panel: http://10.0.0.1/",
|
||||
"Admin panel: http://10.0.0.2/",
|
||||
},
|
||||
wantNotContains: []string{
|
||||
"Error:",
|
||||
},
|
||||
orderedPairs: [][2]string{
|
||||
{"• 10.0.0.1:8554", "• 10.0.0.2:554"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty discovered credentials render as none",
|
||||
streams: []cameradar.Stream{
|
||||
{
|
||||
Address: netip.MustParseAddr("10.0.0.4"),
|
||||
Port: 554,
|
||||
Available: true,
|
||||
RouteFound: true,
|
||||
Routes: []string{"stream"},
|
||||
CredentialsFound: true,
|
||||
AuthenticationType: cameradar.AuthNone,
|
||||
},
|
||||
},
|
||||
wantContains: []string{
|
||||
"Accessible streams: 1",
|
||||
"Credentials: none",
|
||||
"RTSP URL: rtsp://10.0.0.4:554/stream",
|
||||
},
|
||||
wantNotContains: []string{
|
||||
"Credentials: :",
|
||||
"rtsp://:@10.0.0.4:554/stream",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got := ui.FormatSummary(test.streams, test.err)
|
||||
|
||||
for _, expected := range test.wantContains {
|
||||
assert.Contains(t, got, expected)
|
||||
}
|
||||
for _, unexpected := range test.wantNotContains {
|
||||
assert.NotContains(t, got, unexpected)
|
||||
}
|
||||
for _, pair := range test.orderedPairs {
|
||||
first := strings.Index(got, pair[0])
|
||||
second := strings.Index(got, pair[1])
|
||||
assert.True(t, first >= 0 && second >= 0 && first < second)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,714 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/charmbracelet/bubbles/progress"
|
||||
"github.com/charmbracelet/bubbles/spinner"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
statePending state = iota
|
||||
stateActive
|
||||
stateDone
|
||||
stateError
|
||||
)
|
||||
|
||||
type logLevel int
|
||||
|
||||
const (
|
||||
logInfo logLevel = iota
|
||||
logDebug
|
||||
logError
|
||||
)
|
||||
|
||||
type stepMsg struct {
|
||||
step cameradar.Step
|
||||
state state
|
||||
message string
|
||||
}
|
||||
|
||||
type logMsg struct {
|
||||
level logLevel
|
||||
step cameradar.Step
|
||||
message string
|
||||
}
|
||||
|
||||
type progressMsg struct {
|
||||
step cameradar.Step
|
||||
total int
|
||||
increment int
|
||||
}
|
||||
|
||||
type closeMsg struct{}
|
||||
|
||||
type summaryMsg struct {
|
||||
streams []cameradar.Stream
|
||||
final bool
|
||||
}
|
||||
|
||||
type summaryTable struct {
|
||||
table table.Model
|
||||
}
|
||||
|
||||
const (
|
||||
summaryMinHeight = 8
|
||||
summaryMaxHeight = 10
|
||||
summaryColumnCount = 8
|
||||
)
|
||||
|
||||
// TUIReporter renders a Bubble Tea based UI.
|
||||
type TUIReporter struct {
|
||||
program *tea.Program
|
||||
debug bool
|
||||
once sync.Once
|
||||
closed chan struct{}
|
||||
mu sync.Mutex
|
||||
last []cameradar.Stream
|
||||
}
|
||||
|
||||
// NewTUIReporter creates a new Bubble Tea reporter.
|
||||
func NewTUIReporter(debug bool, out io.Writer, buildInfo BuildInfo, cancel context.CancelFunc) (*TUIReporter, error) {
|
||||
spin := spinner.New()
|
||||
spin.Spinner = spinner.Dot
|
||||
spin.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("63"))
|
||||
|
||||
prog := progress.New(
|
||||
progress.WithDefaultGradient(),
|
||||
progress.WithFillCharacters('━', '·'),
|
||||
progress.WithoutPercentage(),
|
||||
progress.WithWidth(28),
|
||||
)
|
||||
|
||||
initial := &modelState{
|
||||
steps: cameradar.Steps(),
|
||||
status: make(map[cameradar.Step]state),
|
||||
debug: debug,
|
||||
buildInfo: buildInfo,
|
||||
cancel: cancel,
|
||||
spinner: spin,
|
||||
progress: prog,
|
||||
progressTotals: make(map[cameradar.Step]int),
|
||||
progressCounts: make(map[cameradar.Step]int),
|
||||
}
|
||||
|
||||
p := tea.NewProgram(initial, tea.WithInputTTY(), tea.WithOutput(out), tea.WithAltScreen())
|
||||
reporter := &TUIReporter{program: p, debug: debug, closed: make(chan struct{})}
|
||||
|
||||
go func() {
|
||||
model, err := p.Run()
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(out, "Error running TUI: %v\n", err)
|
||||
close(reporter.closed)
|
||||
return
|
||||
}
|
||||
|
||||
if rendered, ok := model.(*modelState); ok {
|
||||
output := rendered.FinalView()
|
||||
if len(rendered.summaryStreams) == 0 {
|
||||
fallback := reporter.snapshotSummary()
|
||||
if len(fallback) > 0 {
|
||||
tmp := &modelState{
|
||||
summaryStreams: fallback,
|
||||
width: rendered.width,
|
||||
status: summaryStatusAllDone(),
|
||||
}
|
||||
output = tmp.FinalView()
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, output)
|
||||
}
|
||||
close(reporter.closed)
|
||||
}()
|
||||
|
||||
return reporter, nil
|
||||
}
|
||||
|
||||
// Start implements Reporter.
|
||||
func (r *TUIReporter) Start(step cameradar.Step, message string) {
|
||||
r.send(stepMsg{step: step, state: stateActive, message: message})
|
||||
}
|
||||
|
||||
// Done implements Reporter.
|
||||
func (r *TUIReporter) Done(step cameradar.Step, message string) {
|
||||
r.send(stepMsg{step: step, state: stateDone, message: message})
|
||||
}
|
||||
|
||||
// Progress implements Reporter.
|
||||
func (r *TUIReporter) Progress(step cameradar.Step, message string) {
|
||||
if kind, value, ok := cameradar.ParseProgressMessage(message); ok {
|
||||
msg := progressMsg{step: step}
|
||||
if kind == "total" {
|
||||
msg.total = value
|
||||
}
|
||||
if kind == "tick" {
|
||||
msg.increment = value
|
||||
}
|
||||
r.send(msg)
|
||||
return
|
||||
}
|
||||
|
||||
r.send(logMsg{level: logInfo, step: step, message: message})
|
||||
}
|
||||
|
||||
// Debug implements Reporter.
|
||||
func (r *TUIReporter) Debug(step cameradar.Step, message string) {
|
||||
if !r.debug {
|
||||
return
|
||||
}
|
||||
|
||||
r.send(logMsg{level: logDebug, step: step, message: message})
|
||||
}
|
||||
|
||||
// Error implements Reporter.
|
||||
func (r *TUIReporter) Error(step cameradar.Step, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.send(stepMsg{step: step, state: stateError, message: err.Error()})
|
||||
}
|
||||
|
||||
// Summary implements Reporter.
|
||||
func (r *TUIReporter) Summary(streams []cameradar.Stream, _ error) {
|
||||
cloned := copyStreams(streams)
|
||||
r.recordSummary(cloned)
|
||||
r.send(summaryMsg{streams: cloned, final: true})
|
||||
}
|
||||
|
||||
// UpdateSummary updates the summary section with partial results.
|
||||
func (r *TUIReporter) UpdateSummary(streams []cameradar.Stream) {
|
||||
cloned := copyStreams(streams)
|
||||
r.recordSummary(cloned)
|
||||
r.send(summaryMsg{streams: cloned, final: false})
|
||||
}
|
||||
|
||||
func (r *TUIReporter) recordSummary(streams []cameradar.Stream) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.last = streams
|
||||
}
|
||||
|
||||
func (r *TUIReporter) snapshotSummary() []cameradar.Stream {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return copyStreams(r.last)
|
||||
}
|
||||
|
||||
// Close implements Reporter.
|
||||
func (r *TUIReporter) Close() {
|
||||
r.once.Do(func() {
|
||||
r.send(closeMsg{})
|
||||
})
|
||||
|
||||
// Timeout after 2 seconds to avoid hanging forever.
|
||||
select {
|
||||
case <-r.closed:
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
func (r *TUIReporter) send(msg tea.Msg) {
|
||||
if r.program == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.program.Send(msg)
|
||||
}
|
||||
|
||||
func renderStep(step cameradar.Step, state state, spinnerView string) string {
|
||||
label := cameradar.StepLabel(step)
|
||||
symbol := "·"
|
||||
style := dimStyle
|
||||
switch state {
|
||||
case stateActive:
|
||||
symbol = spinnerView
|
||||
style = activeStyle
|
||||
case stateDone:
|
||||
symbol = "✓"
|
||||
style = successStyle
|
||||
case stateError:
|
||||
symbol = "✗"
|
||||
style = errorStyle
|
||||
}
|
||||
return style.Render(fmt.Sprintf("%s %s", symbol, label))
|
||||
}
|
||||
|
||||
func renderLog(entry logMsg) string {
|
||||
prefix := "INFO"
|
||||
style := infoStyle
|
||||
if entry.level == logDebug {
|
||||
prefix = "DEBUG"
|
||||
style = debugStyle
|
||||
}
|
||||
if entry.level == logError {
|
||||
prefix = "ERROR"
|
||||
style = errorStyle
|
||||
}
|
||||
return style.Render(fmt.Sprintf("[%s] %s: %s", prefix, cameradar.StepLabel(entry.step), entry.message))
|
||||
}
|
||||
|
||||
func renderProgress(m *modelState) string {
|
||||
completed, total := progressCounts(m.steps, m.status)
|
||||
percent := progressPercent(m.steps, m.status, m.progressTotals, m.progressCounts)
|
||||
countLabel := dimStyle.Render(fmt.Sprintf("%3.0f%% %d/%d complete", percent*100, completed, total))
|
||||
return fmt.Sprintf("%s %s", m.progress.ViewAs(m.progressVisible), countLabel)
|
||||
}
|
||||
|
||||
func progressCounts(steps []cameradar.Step, status map[cameradar.Step]state) (int, int) {
|
||||
if len(steps) == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
completed := 0
|
||||
for _, step := range steps {
|
||||
switch status[step] {
|
||||
case stateDone, stateError:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
|
||||
return completed, len(steps)
|
||||
}
|
||||
|
||||
func progressPercent(steps []cameradar.Step, status map[cameradar.Step]state, totals, counts map[cameradar.Step]int) float64 {
|
||||
weights := stepWeights()
|
||||
percent := 0.0
|
||||
for _, step := range steps {
|
||||
weight := weights[step]
|
||||
if weight <= 0 {
|
||||
continue
|
||||
}
|
||||
percent += weight * stepProgress(step, status, totals, counts)
|
||||
}
|
||||
if percent > 1 {
|
||||
return 1
|
||||
}
|
||||
return percent
|
||||
}
|
||||
|
||||
func stepWeights() map[cameradar.Step]float64 {
|
||||
return map[cameradar.Step]float64{
|
||||
cameradar.StepScan: 0.15,
|
||||
cameradar.StepAttackRoutes: 0.25,
|
||||
cameradar.StepDetectAuth: 0.05,
|
||||
cameradar.StepAttackCredentials: 0.35,
|
||||
cameradar.StepValidateStreams: 0.2,
|
||||
cameradar.StepSummary: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
func stepProgress(step cameradar.Step, status map[cameradar.Step]state, totals, counts map[cameradar.Step]int) float64 {
|
||||
if total := totals[step]; total > 0 {
|
||||
count := counts[step]
|
||||
if count >= total {
|
||||
return 1
|
||||
}
|
||||
return float64(count) / float64(total)
|
||||
}
|
||||
|
||||
switch status[step] {
|
||||
case stateDone, stateError:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func queueProgressUpdate(m *modelState) {
|
||||
desired := progressPercent(m.steps, m.status, m.progressTotals, m.progressCounts)
|
||||
if desired <= m.progressTarget {
|
||||
return
|
||||
}
|
||||
m.progressTarget = desired
|
||||
}
|
||||
|
||||
func advanceProgress(m *modelState) {
|
||||
if m.progressVisible >= m.progressTarget {
|
||||
return
|
||||
}
|
||||
remaining := m.progressTarget - m.progressVisible
|
||||
step := remaining * 0.2
|
||||
if step < 0.02 {
|
||||
step = 0.02
|
||||
}
|
||||
if m.quitting && step < 0.08 {
|
||||
step = 0.08
|
||||
}
|
||||
if remaining < step {
|
||||
m.progressVisible = m.progressTarget
|
||||
return
|
||||
}
|
||||
m.progressVisible += step
|
||||
}
|
||||
|
||||
func progressComplete(m modelState) bool {
|
||||
return m.progressVisible >= m.progressTarget
|
||||
}
|
||||
|
||||
func markStepComplete(m *modelState, step cameradar.Step) {
|
||||
if m.progressTotals[step] == 0 {
|
||||
m.progressTotals[step] = 1
|
||||
}
|
||||
if m.progressCounts[step] < m.progressTotals[step] {
|
||||
m.progressCounts[step] = m.progressTotals[step]
|
||||
}
|
||||
}
|
||||
|
||||
func progressWidth(width int) int {
|
||||
if width <= 0 {
|
||||
return 28
|
||||
}
|
||||
if width < 60 {
|
||||
return 20
|
||||
}
|
||||
if width < 100 {
|
||||
return 28
|
||||
}
|
||||
return 36
|
||||
}
|
||||
|
||||
func buildSummaryTables(streams []cameradar.Stream, width int, status map[cameradar.Step]state, maxRows int) []summaryTable {
|
||||
visibility := summaryVisibility(status)
|
||||
accessible, others := partitionStreams(streams)
|
||||
rows := append(buildSummaryRows(accessible, visibility), buildSummaryRows(others, visibility)...)
|
||||
if len(rows) == 0 {
|
||||
rows = []table.Row{emptySummaryRow()}
|
||||
}
|
||||
|
||||
if maxRows > 0 {
|
||||
switch {
|
||||
case len(rows) > maxRows:
|
||||
if maxRows == 1 {
|
||||
rows = []table.Row{summaryOverflowRow(len(rows))}
|
||||
} else {
|
||||
visibleRows := maxRows - 1
|
||||
hidden := len(rows) - visibleRows
|
||||
rows = append(rows[:visibleRows], summaryOverflowRow(hidden))
|
||||
}
|
||||
case len(rows) < maxRows:
|
||||
rows = padSummaryRows(rows, maxRows)
|
||||
}
|
||||
}
|
||||
|
||||
columns := summaryColumns(width, rows)
|
||||
model := table.New(
|
||||
table.WithColumns(columns),
|
||||
table.WithRows(rows),
|
||||
table.WithFocused(false),
|
||||
table.WithHeight(len(rows)),
|
||||
)
|
||||
model.SetStyles(summaryTableStyles())
|
||||
|
||||
return []summaryTable{{table: model}}
|
||||
}
|
||||
|
||||
func renderSummaryTitle(streams []cameradar.Stream) string {
|
||||
accessible, _ := partitionStreams(streams)
|
||||
return fmt.Sprintf("Summary - Streams (%d accessible / %d total)", len(accessible), len(streams))
|
||||
}
|
||||
|
||||
func summaryStatusAllDone() map[cameradar.Step]state {
|
||||
status := make(map[cameradar.Step]state)
|
||||
for _, step := range cameradar.Steps() {
|
||||
status[step] = stateDone
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
const emptyEntry = "—"
|
||||
|
||||
func emptySummaryRow() table.Row {
|
||||
row := make(table.Row, summaryColumnCount)
|
||||
for i := range row {
|
||||
row[i] = emptyEntry
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func padSummaryRows(rows []table.Row, maxRows int) []table.Row {
|
||||
for len(rows) < maxRows {
|
||||
rows = append(rows, emptySummaryRow())
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func summaryOverflowRow(hidden int) table.Row {
|
||||
row := emptySummaryRow()
|
||||
if hidden <= 0 {
|
||||
return row
|
||||
}
|
||||
label := "\u2026 1 more stream"
|
||||
if hidden > 1 {
|
||||
label = fmt.Sprintf("\u2026 %d more streams", hidden)
|
||||
}
|
||||
row[0] = label
|
||||
return row
|
||||
}
|
||||
|
||||
func renderSummaryTablePlain(columns []table.Column, rows []table.Row) string {
|
||||
colWidths := make([]int, len(columns))
|
||||
for i, col := range columns {
|
||||
colWidths[i] = max(col.Width, len([]rune(col.Title)))
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
builder.WriteString(renderSummaryBorder("┌", "┬", "┐", colWidths))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryRow(columnTitles(columns), colWidths))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryBorder("├", "┼", "┤", colWidths))
|
||||
for _, row := range rows {
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryRow(row, colWidths))
|
||||
}
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryBorder("└", "┴", "┘", colWidths))
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func renderSummaryBorder(left, middle, right string, widths []int) string {
|
||||
parts := make([]string, 0, len(widths))
|
||||
for _, width := range widths {
|
||||
parts = append(parts, strings.Repeat("─", width+2))
|
||||
}
|
||||
return left + strings.Join(parts, middle) + right
|
||||
}
|
||||
|
||||
func renderSummaryRow(cells []string, widths []int) string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("│")
|
||||
for i, width := range widths {
|
||||
value := ""
|
||||
if i < len(cells) {
|
||||
value = cells[i]
|
||||
}
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(padAndTrim(value, width))
|
||||
builder.WriteString(" │")
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func padAndTrim(value string, width int) string {
|
||||
if width <= 0 {
|
||||
return ""
|
||||
}
|
||||
runes := []rune(value)
|
||||
if len(runes) > width {
|
||||
return string(runes[:width])
|
||||
}
|
||||
if len(runes) < width {
|
||||
return string(runes) + strings.Repeat(" ", width-len(runes))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func columnTitles(columns []table.Column) []string {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
titles := make([]string, len(columns))
|
||||
for i, col := range columns {
|
||||
titles[i] = col.Title
|
||||
}
|
||||
return titles
|
||||
}
|
||||
|
||||
func buildSummaryRows(streams []cameradar.Stream, visibility summaryVisibilityState) []table.Row {
|
||||
rows := make([]table.Row, 0, len(streams))
|
||||
for _, stream := range streams {
|
||||
target := fmt.Sprintf("%s:%d", stream.Address.String(), stream.Port)
|
||||
device := emptyEntry
|
||||
if visibility.showDevice && stream.Device != "" {
|
||||
device = stream.Device
|
||||
}
|
||||
|
||||
routes := emptyEntry
|
||||
if visibility.showRoutes && len(stream.Routes) > 0 {
|
||||
routes = strings.Join(stream.Routes, ", ")
|
||||
}
|
||||
|
||||
credentials := emptyEntry
|
||||
if visibility.showCredentials && stream.CredentialsFound {
|
||||
credentials = formatCredentials(stream)
|
||||
}
|
||||
|
||||
available := emptyEntry
|
||||
if visibility.showAvailable {
|
||||
available = "no"
|
||||
if stream.Available {
|
||||
available = "yes"
|
||||
}
|
||||
}
|
||||
|
||||
rtspURL := emptyEntry
|
||||
if visibility.showCredentials && stream.RouteFound && stream.CredentialsFound {
|
||||
rtspURL = formatRTSPURL(stream)
|
||||
}
|
||||
|
||||
authType := emptyEntry
|
||||
if visibility.showAuth {
|
||||
authType = authTypeLabel(stream.AuthenticationType)
|
||||
}
|
||||
|
||||
rows = append(rows, table.Row{
|
||||
target,
|
||||
device,
|
||||
authType,
|
||||
routes,
|
||||
credentials,
|
||||
available,
|
||||
rtspURL,
|
||||
adminPanelLabel(stream, visibility),
|
||||
})
|
||||
}
|
||||
|
||||
return rows
|
||||
}
|
||||
|
||||
func summaryColumns(width int, rows []table.Row) []table.Column {
|
||||
columns := []table.Column{
|
||||
{Title: "Target", Width: 18},
|
||||
{Title: "Device", Width: 14},
|
||||
{Title: "Auth", Width: 8},
|
||||
{Title: "Routes", Width: 18},
|
||||
{Title: "Credentials", Width: 16},
|
||||
{Title: "Available", Width: 9},
|
||||
{Title: "RTSP URL", Width: 30},
|
||||
{Title: "Admin", Width: 24},
|
||||
}
|
||||
columns[6].Width = maxColumnWidth(columns[6].Title, rows, 6, columns[6].Width)
|
||||
columns[7].Width = maxColumnWidth(columns[7].Title, rows, 7, columns[7].Width)
|
||||
|
||||
if width <= 0 {
|
||||
return columns
|
||||
}
|
||||
|
||||
columns = clampColumns(columns, max(width-2, 60))
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func clampColumns(columns []table.Column, maxWidth int) []table.Column {
|
||||
padding := 2 * len(columns)
|
||||
contentWidth := 0
|
||||
for _, col := range columns {
|
||||
contentWidth += col.Width
|
||||
}
|
||||
contentWidth += padding
|
||||
if contentWidth <= maxWidth {
|
||||
return columns
|
||||
}
|
||||
|
||||
over := contentWidth - maxWidth
|
||||
shrinkOrder := []int{7, 3, 4, 1}
|
||||
minWidths := map[int]int{
|
||||
7: 10,
|
||||
3: 10,
|
||||
4: 10,
|
||||
1: 10,
|
||||
}
|
||||
for over > 0 {
|
||||
changed := false
|
||||
for _, idx := range shrinkOrder {
|
||||
minWidth := minWidths[idx]
|
||||
if columns[idx].Width > minWidth {
|
||||
columns[idx].Width--
|
||||
over--
|
||||
changed = true
|
||||
if over == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func summaryTableStyles() table.Styles {
|
||||
styles := table.DefaultStyles()
|
||||
styles.Header = styles.Header.
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240")).
|
||||
BorderBottom(true).
|
||||
Bold(true)
|
||||
styles.Selected = lipgloss.NewStyle()
|
||||
styles.Cell = styles.Cell.Padding(0, 1)
|
||||
return styles
|
||||
}
|
||||
|
||||
func maxColumnWidth(title string, rows []table.Row, idx, minWidth int) int {
|
||||
width := max(len(title), minWidth)
|
||||
for _, row := range rows {
|
||||
if idx >= len(row) {
|
||||
continue
|
||||
}
|
||||
if len(row[idx]) > width {
|
||||
width = len(row[idx])
|
||||
}
|
||||
}
|
||||
return width
|
||||
}
|
||||
|
||||
func adminPanelLabel(stream cameradar.Stream, visibility summaryVisibilityState) string {
|
||||
if !visibility.showCredentials || !stream.CredentialsFound {
|
||||
return emptyEntry
|
||||
}
|
||||
return formatAdminPanelURL(stream)
|
||||
}
|
||||
|
||||
type summaryVisibilityState struct {
|
||||
showDevice bool
|
||||
showRoutes bool
|
||||
showAuth bool
|
||||
showCredentials bool
|
||||
showAvailable bool
|
||||
}
|
||||
|
||||
func summaryVisibility(status map[cameradar.Step]state) summaryVisibilityState {
|
||||
return summaryVisibilityState{
|
||||
showDevice: stepComplete(status, cameradar.StepScan),
|
||||
showRoutes: stepComplete(status, cameradar.StepAttackRoutes),
|
||||
showAuth: stepComplete(status, cameradar.StepDetectAuth),
|
||||
showCredentials: stepComplete(status, cameradar.StepAttackCredentials),
|
||||
showAvailable: stepComplete(status, cameradar.StepValidateStreams),
|
||||
}
|
||||
}
|
||||
|
||||
func stepComplete(status map[cameradar.Step]state, step cameradar.Step) bool {
|
||||
if status == nil {
|
||||
return false
|
||||
}
|
||||
switch status[step] {
|
||||
case stateDone, stateError:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func copyStreams(streams []cameradar.Stream) []cameradar.Stream {
|
||||
if len(streams) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]cameradar.Stream, len(streams))
|
||||
copy(cloned, streams)
|
||||
return cloned
|
||||
}
|
||||
@@ -0,0 +1,25 @@
|
||||
package ports
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// InferTunnelScheme returns the likely scheme for a given port and optional service name.
|
||||
func InferTunnelScheme(port uint16, serviceName string) string {
|
||||
if len(serviceName) > 0 {
|
||||
name := strings.ToLower(strings.TrimSpace(serviceName))
|
||||
switch name {
|
||||
case "rtsps", "https", "http":
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
switch port {
|
||||
case 443, 8443:
|
||||
return "https"
|
||||
case 80, 8080:
|
||||
return "http"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
+40
@@ -0,0 +1,40 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const progressMessagePrefix = "\x00progress:"
|
||||
|
||||
// ProgressTotalMessage returns a progress control message that sets the total units for a step.
|
||||
func ProgressTotalMessage(total int) string {
|
||||
return progressMessagePrefix + "total=" + strconv.Itoa(total)
|
||||
}
|
||||
|
||||
// ProgressTickMessage returns a progress control message that increments a step's progress by one unit.
|
||||
func ProgressTickMessage() string {
|
||||
return progressMessagePrefix + "tick"
|
||||
}
|
||||
|
||||
// ParseProgressMessage parses a progress control message.
|
||||
// It returns a kind of "total" or "tick" and an optional value.
|
||||
func ParseProgressMessage(message string) (string, int, bool) {
|
||||
if !strings.HasPrefix(message, progressMessagePrefix) {
|
||||
return "", 0, false
|
||||
}
|
||||
|
||||
payload := strings.TrimPrefix(message, progressMessagePrefix)
|
||||
if payload == "tick" {
|
||||
return "tick", 1, true
|
||||
}
|
||||
if valuePart, ok := strings.CutPrefix(payload, "total="); ok {
|
||||
value, err := strconv.Atoi(valuePart)
|
||||
if err != nil {
|
||||
return "", 0, false
|
||||
}
|
||||
return "total", value, true
|
||||
}
|
||||
|
||||
return "", 0, false
|
||||
}
|
||||
@@ -0,0 +1,91 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
)
|
||||
|
||||
// AuthType represents the RTSP authentication method.
|
||||
type AuthType int
|
||||
|
||||
// Supported authentication methods.
|
||||
const (
|
||||
AuthUnknown AuthType = iota
|
||||
AuthNone
|
||||
AuthBasic
|
||||
AuthDigest
|
||||
)
|
||||
|
||||
// Stream represents a camera's stream, typically accessed over RTSP/RTSPS.
|
||||
type Stream struct {
|
||||
Device string `json:"device"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Routes []string `json:"route"`
|
||||
Address netip.Addr `json:"address" validate:"required"`
|
||||
Port uint16 `json:"port" validate:"required"`
|
||||
|
||||
CredentialsFound bool `json:"credentials_found"`
|
||||
RouteFound bool `json:"route_found"`
|
||||
Available bool `json:"available"`
|
||||
Scheme string `json:"scheme"`
|
||||
|
||||
AuthenticationType AuthType `json:"authentication_type"`
|
||||
}
|
||||
|
||||
func (s Stream) resolvedScheme() string {
|
||||
scheme := s.Scheme
|
||||
if scheme == "" {
|
||||
return "rtsp"
|
||||
}
|
||||
return scheme
|
||||
}
|
||||
|
||||
func parseScheme(scheme string) string {
|
||||
switch scheme {
|
||||
case "http":
|
||||
return "rtsp"
|
||||
case "https":
|
||||
return "rtsps"
|
||||
default:
|
||||
return scheme
|
||||
}
|
||||
}
|
||||
|
||||
// Route returns this stream's route if there is one.
|
||||
func (s Stream) Route() string {
|
||||
if len(s.Routes) > 0 {
|
||||
return s.Routes[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// String builds the stream URL using the configured scheme, defaulting to rtsp.
|
||||
func (s Stream) String() string {
|
||||
scheme := s.resolvedScheme()
|
||||
|
||||
host := net.JoinHostPort(s.Address.String(), strconv.Itoa(int(s.Port)))
|
||||
path := "/" + strings.TrimLeft(strings.TrimSpace(s.Route()), "/")
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: host,
|
||||
Path: path,
|
||||
}
|
||||
if s.Username != "" || s.Password != "" {
|
||||
u.User = url.UserPassword(s.Username, s.Password)
|
||||
}
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// URL parses the stream URL into a *base.URL, normalizing http/https to rtsp/rtsps.
|
||||
func (s Stream) URL() (*base.URL, error) {
|
||||
s.Scheme = parseScheme(s.resolvedScheme())
|
||||
return base.ParseURL(s.String())
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Mode defines which UI renderer to use.
|
||||
type Mode string
|
||||
|
||||
// Supported rendering modes.
|
||||
const (
|
||||
ModeAuto Mode = "auto"
|
||||
ModeTUI Mode = "tui"
|
||||
ModePlain Mode = "plain"
|
||||
)
|
||||
|
||||
// Step identifies a stage in the workflow.
|
||||
type Step string
|
||||
|
||||
// Supported steps.
|
||||
const (
|
||||
StepScan Step = "scan"
|
||||
StepAttackRoutes Step = "attack-routes"
|
||||
StepDetectAuth Step = "detect-auth"
|
||||
StepAttackCredentials Step = "attack-credentials"
|
||||
StepValidateStreams Step = "validate-streams"
|
||||
StepSummary Step = "summary"
|
||||
)
|
||||
|
||||
// StepLabel returns the human-readable label for a step.
|
||||
func StepLabel(step Step) string {
|
||||
switch step {
|
||||
case StepScan:
|
||||
return "Scan targets"
|
||||
case StepAttackRoutes:
|
||||
return "Attack routes"
|
||||
case StepDetectAuth:
|
||||
return "Detect authentication"
|
||||
case StepAttackCredentials:
|
||||
return "Attack credentials"
|
||||
case StepValidateStreams:
|
||||
return "Validate streams"
|
||||
case StepSummary:
|
||||
return "Summary"
|
||||
default:
|
||||
return string(step)
|
||||
}
|
||||
}
|
||||
|
||||
// Steps returns the ordered list of steps.
|
||||
func Steps() []Step {
|
||||
return []Step{
|
||||
StepScan,
|
||||
StepAttackRoutes,
|
||||
StepDetectAuth,
|
||||
StepAttackCredentials,
|
||||
StepValidateStreams,
|
||||
StepSummary,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseMode parses a user-provided UI mode.
|
||||
func ParseMode(value string) (Mode, error) {
|
||||
mode := Mode(strings.ToLower(strings.TrimSpace(value)))
|
||||
switch mode {
|
||||
case ModeAuto, ModeTUI, ModePlain:
|
||||
return mode, nil
|
||||
case "":
|
||||
return ModeAuto, nil
|
||||
default:
|
||||
return ModeAuto, fmt.Errorf("invalid ui mode %q", value)
|
||||
}
|
||||
}
|
||||
+94
@@ -0,0 +1,94 @@
|
||||
package cameradar_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want cameradar.Mode
|
||||
wantErr require.ErrorAssertionFunc
|
||||
wantErrMessage string
|
||||
}{
|
||||
{
|
||||
name: "auto",
|
||||
input: "auto",
|
||||
want: cameradar.ModeAuto,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "tui",
|
||||
input: "TUI",
|
||||
want: cameradar.ModeTUI,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "plain",
|
||||
input: "plain",
|
||||
want: cameradar.ModePlain,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
input: " ",
|
||||
want: cameradar.ModeAuto,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "invalid",
|
||||
input: "nope",
|
||||
want: cameradar.ModeAuto,
|
||||
wantErr: require.Error,
|
||||
wantErrMessage: "invalid ui mode",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got, err := cameradar.ParseMode(test.input)
|
||||
test.wantErr(t, err)
|
||||
if test.wantErrMessage != "" {
|
||||
assert.ErrorContains(t, err, test.wantErrMessage)
|
||||
}
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepLabel(t *testing.T) {
|
||||
tests := []struct {
|
||||
step cameradar.Step
|
||||
want string
|
||||
}{
|
||||
{step: cameradar.StepScan, want: "Scan targets"},
|
||||
{step: cameradar.StepAttackRoutes, want: "Attack routes"},
|
||||
{step: cameradar.StepDetectAuth, want: "Detect authentication"},
|
||||
{step: cameradar.StepAttackCredentials, want: "Attack credentials"},
|
||||
{step: cameradar.StepValidateStreams, want: "Validate streams"},
|
||||
{step: cameradar.StepSummary, want: "Summary"},
|
||||
{step: cameradar.Step("custom"), want: "custom"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.want, func(t *testing.T) {
|
||||
assert.Equal(t, test.want, cameradar.StepLabel(test.step))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSteps(t *testing.T) {
|
||||
assert.Equal(t, []cameradar.Step{
|
||||
cameradar.StepScan,
|
||||
cameradar.StepAttackRoutes,
|
||||
cameradar.StepDetectAuth,
|
||||
cameradar.StepAttackCredentials,
|
||||
cameradar.StepValidateStreams,
|
||||
cameradar.StepSummary,
|
||||
}, cameradar.Steps())
|
||||
}
|
||||
Reference in New Issue
Block a user