Compare commits

..

60 Commits

Author SHA1 Message Date
Brendan LE GLAUNEC d98d78cd73 v1.1.3 : Travis functional testing & minor changes 2016-11-23 08:49:19 +01:00
Brendan LE GLAUNEC e9ffb44b45 v1.1.2 : Travis build test integration & changes to docker deployment 2016-11-23 08:31:35 +01:00
Brendan LE GLAUNEC 9f85415d89 Added docker pulls badge to README.md 2016-11-14 13:40:54 +01:00
Brendan LE GLAUNEC c0d890acad Merge pull request #20 from EtixLabs/bugfix/version-1.1.1
v1.1.1 : Fixed functional tests & Multiple bugfixes
2016-11-12 13:13:14 +01:00
Brendan LE GLAUNEC 553524ae43 v1.1.1 : Fixed functional tests & Multiple bugfixes 2016-11-12 12:55:32 +01:00
Brendan LE GLAUNEC f80af6bd58 Merge pull request #18 from EtixLabs/bugfix/remove-unnecessary-null-pointer-checks
v1.1.0 : Removed unnecessary null pointer checks
2016-11-03 16:36:46 +01:00
Brendan LE GLAUNEC 6c127e4cbe v1.1.0 : Removed unnecessary null pointer checks 2016-11-03 16:34:54 +01:00
Brendan LE GLAUNEC c16c8c0aaa v1.1.0 : Docker Hub Readme changes 2016-11-03 08:45:20 +01:00
Brendan LE GLAUNEC be74c3c814 v1.1.0 : Deployment updated & Docker Hub 2016-11-02 14:42:33 +01:00
Brendan LE GLAUNEC 2f93ddd7e5 v1.1.0 : Updated deployment 2016-11-02 14:42:22 +01:00
Brendan LE GLAUNEC 108f869a43 v1.1.0 : Updated package 2016-11-02 08:42:45 +01:00
Brendan LE GLAUNEC 6685f74a90 v1.1.0 : Fixed multithreading & added timeout to ffmpeg 2016-11-02 07:56:03 +01:00
Brendan LE GLAUNEC 37176292d0 Merge branch 'master' of github.com:EtixLabs/cameradar 2016-10-31 10:13:52 +01:00
Brendan LE GLAUNEC a49b8ef481 v1.1.0 : Updated package 2016-10-31 10:02:53 +01:00
Brendan LE GLAUNEC 8e26751247 v1.1.0 : Added GST RTSP standard in cmd line 2016-10-31 10:01:13 +01:00
Brendan LE GLAUNEC 2564943ae7 Merge pull request #15 from EtixLabs/feature/improve-cameradar-ux
Feature/improve cameradar ux last details
2016-10-29 08:59:48 +02:00
Brendan LE GLAUNEC 74b4590758 v1.1.0 : Added package generation to README 2016-10-29 08:57:51 +02:00
Brendan LE GLAUNEC 9e9c1ba5b6 v1.1.0 : Updated package name & added packge generation script 2016-10-29 08:55:14 +02:00
Brendan LE GLAUNEC 9d78e84dc0 Merge pull request #14 from EtixLabs/feature/improve-cameradar-ux
v1.1.0 : Multithreading & UX update
2016-10-28 11:15:36 +02:00
Brendan LE GLAUNEC ecd318d0c2 v1.1.0 : Update Readme & Removed debug logs 2016-10-28 10:53:41 +02:00
Brendan LE GLAUNEC 58b101ed60 v1.1.0 : Multithreading & UX update 2016-10-28 09:50:37 +02:00
Brendan LE GLAUNEC 7e6c501582 Merge branch 'master' of github.com:EtixLabs/cameradar 2016-10-24 14:02:59 +02:00
Brendan LE GLAUNEC d9a221f9c6 Added standard Comelit RTSP URL to dictionary 2016-10-24 14:02:51 +02:00
Brendan LE GLAUNEC c10525b50e Added Cameradar logo to README.md 2016-10-13 15:51:30 +02:00
Brendan LE GLAUNEC 67b118a82e Added Cameradar logo 2016-10-13 15:43:48 +02:00
Brendan LE GLAUNEC 1f5db9baa0 Updated testing binary 2016-09-12 14:28:28 +02:00
Brendan LE GLAUNEC 4ef463d8a9 Merge branch 'master' of github.com:EtixLabs/cameradar 2016-08-31 13:34:01 +02:00
Brendan LE GLAUNEC ae3329bd25 v1.0.5 : Fixed a potential failure in MySQL CM and fixed code 2016-08-31 12:46:21 +02:00
Brendan LE GLAUNEC 46e17bb0ee Create CHANGELOG.md 2016-08-31 10:36:39 +02:00
Brendan LE GLAUNEC 27b296c9d2 v1.0.4 : Fixed nmap package detection 2016-08-31 09:39:08 +02:00
Brendan LE GLAUNEC 006c0139be Merge branch 'master' of github.com:EtixLabs/cameradar 2016-08-30 16:59:08 +02:00
Brendan LE GLAUNEC 63119d3ff3 v1.0.3 : Corrected GStreamer check 2016-08-30 16:58:46 +02:00
Brendan LE GLAUNEC 5859e9c595 Removed forgotten logs 2016-08-26 12:59:46 +02:00
Brendan LE GLAUNEC d0220ceb7f v1.0.2 - Fix issues with MySQL CM 2016-08-24 12:11:54 +02:00
Brendan LE GLAUNEC 064a6ff588 v1.0.1 : Removed useless text from the Readme 2016-07-08 15:05:17 +02:00
Brendan LE GLAUNEC 9a269bfe0e v1.0.1 : Updated to 16.04 & removed boost dependency 2016-07-07 17:47:08 +02:00
Brendan LE GLAUNEC c44b933a83 v 1.0.0 - Changed tag - Updated deployment version 2016-06-21 11:00:35 +02:00
Brendan LE GLAUNEC 1f5e9fc502 v 1.0.0 - Added functionnal testing - Needs Travis integration 2016-06-21 10:53:24 +02:00
Brendan LE GLAUNEC 08231074b9 Update README.md 2016-06-07 12:38:17 +02:00
Brendan LE GLAUNEC c6d801750e Cameradar now waits for MySQL before being deployed 2016-06-03 09:07:07 +02:00
Brendan LE GLAUNEC 2cf49a8db4 Update CMakeLists.txt 2016-06-03 08:49:55 +02:00
Brendan LE GLAUNEC 4fba8a8594 Update README.md 2016-06-02 10:13:48 +02:00
Brendan LE GLAUNEC 76365e3a07 v0.2.2 : Cameradar now supports badly configured cameras 2016-05-27 14:36:52 +02:00
Brendan LE GLAUNEC e6a38af241 Update README.md 2016-05-26 10:52:29 +02:00
Brendan LE GLAUNEC a4ad49c1a7 Quick MySQL docker deployment & code cleaning 2016-05-24 08:56:11 +02:00
Brendan LE GLAUNEC 0ac1046138 MySQL Cache Manager & code cleanup 2016-05-23 21:22:12 +02:00
Brendan LE GLAUNEC eef9c6f562 Deployment / CPack / Docker / Boost / Versionning 2016-05-23 21:20:56 +02:00
Brendan LE GLAUNEC cf18d869e0 JsonCPP should now be downloaded and included properly 2016-05-23 21:19:40 +02:00
Brendan LE GLAUNEC 4017429835 Initial commit 2016-05-23 21:14:59 +02:00
Brendan LE GLAUNEC 615f14d614 Update README.md 2016-05-23 17:14:41 +02:00
Brendan LE GLAUNEC d09b7abea9 Cloning method updated to HTTPS / TODO updated 2016-05-21 00:58:22 +02:00
Brendan LE GLAUNEC fdb146f019 Merge pull request #7 from EtixLabs/feature/docker-deployment
Deployment / CPack / Docker / Boost / Versionning
2016-05-21 00:43:11 +02:00
Brendan LE GLAUNEC 77446189dd Deployment / CPack / Docker / Boost / Versionning 2016-05-21 00:39:14 +02:00
Brendan LE GLAUNEC e4ba477b06 Merge pull request #6 from EtixLabs/feature/docker-deployment
Updated README to add future improvement
2016-05-20 21:37:58 +02:00
Brendan LE GLAUNEC 6ae2608f8e Updated README to add future improvement 2016-05-20 21:36:48 +02:00
Brendan LE GLAUNEC 6908c7bcac Merge pull request #5 from EtixLabs/develop
JsonCPP should now be downloaded and included properly
2016-05-20 21:33:33 +02:00
Brendan LE GLAUNEC adbbe244b0 JsonCPP should now be downloaded and included properly 2016-05-20 21:30:36 +02:00
Brendan LE GLAUNEC 8d6de630a5 Merge pull request #4 from EtixLabs/develop
Dependencies added & README updated
2016-05-20 17:03:43 +02:00
Brendan LE GLAUNEC 9aa86a5c2d Dependencies added & README updated 2016-05-20 17:02:33 +02:00
Brendan LE GLAUNEC 201d7e31c6 Initial commit 2016-05-20 16:13:22 +02:00
205 changed files with 20944 additions and 9450 deletions
+61
View File
@@ -0,0 +1,61 @@
---
Language: Cpp
# BasedOnStyle: Mozilla
AccessModifierOffset: -4
ConstructorInitializerIndentWidth: 0
AlignEscapedNewlinesLeft: false
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortIfStatementsOnASingleLine: true
AllowShortLoopsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AlwaysBreakAfterDefinitionReturnType: true
AlwaysBreakTemplateDeclarations: true
AlwaysBreakBeforeMultilineStrings: true
BreakBeforeBinaryOperators: None
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
BinPackParameters: false
BinPackArguments: false
ColumnLimit: 100
ConstructorInitializerAllOnOneLineOrOnePerLine: true
DerivePointerAlignment: false
ExperimentalAutoDetectBinPacking: false
IndentCaseLabels: false
IndentWrappedFunctionNames: false
IndentFunctionDeclarationAfterType: false
MaxEmptyLinesToKeep: 1
KeepEmptyLinesAtTheStartOfBlocks: false
NamespaceIndentation: None
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakString: 1000
PenaltyBreakFirstLessLess: 120
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Left
SpacesBeforeTrailingComments: 1
Cpp11BracedListStyle: false
Standard: Cpp11
IndentWidth: 4
TabWidth: 8
UseTab: Never
BreakBeforeBraces: Attach
SpacesInParentheses: false
SpacesInSquareBrackets: false
SpacesInAngles: false
SpaceInEmptyParentheses: false
SpacesInCStyleCastParentheses: false
SpaceAfterCStyleCast: false
SpacesInContainerLiterals: true
SpaceBeforeAssignmentOperators: true
ContinuationIndentWidth: 4
CommentPragmas: '^ IWYU pragma:'
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
SpaceBeforeParens: ControlStatements
DisableFormat: false
...
-2
View File
@@ -1,2 +0,0 @@
*.go @Ullaakut @whiteboxsolutions @nblair2
*.md @Ullaakut @whiteboxsolutions @nblair2
-103
View File
@@ -1,103 +0,0 @@
name: Bug report
description: Create a report to help Cameradar improve
labels:
- needs-triage
body:
- type: markdown
attributes:
value: |
Please make sure your problem is not already addressed in another issue.
- type: textarea
id: description
attributes:
label: Description
description: Please give a clear and concise description of the bug.
validations:
required: true
- type: textarea
id: version
attributes:
label: Cameradar version
description: Output of `cameradar version`
render: bash
placeholder: |
Version: v6.0.2-SNAPSHOT-c11e321
Commit: c11e3217ea0b1ea9e45d0da4c072e07775bde68c
Build date: 2026-02-03T10:02:30Z
Nmap: 7.94SVN
validations:
required: true
- type: dropdown
id: env
attributes:
label: Environment
description: How do you run cameradar?
options:
- "`ullaakut/cameradar` docker image"
- Precompiled binary from GitHub releases
- Custom docker image
- Custom binary build
default: 0
validations:
required: true
- type: textarea
id: os
attributes:
label: Operating system
description: Operating system where you run cameradar.
render: bash
placeholder: |
- OS: <Windows | macOS | Linux | Other>
- OS version: <version>
- Architecture: <arch>
validations:
required: false
- type: textarea
id: cmd
attributes:
label: Command
description: The command that you ran and all of its arguments. Make sure to redact any sensitive information. Make sure to run your command in debug mode.
placeholder: |
E.g. `docker run --net=host -it ullaakut/cameradar -t localhost --debug`
validations:
required: true
- type: textarea
id: output
attributes:
label: Output logs
description: Output of the command you ran, including any error messages. Make sure to redact any sensitive information.
placeholder: |
2026-02-03T09:33:24Z [INFO] Startup: Running cameradar version 6.0.2-SNAPSHOT-75bf524, commit 75bf524
2026-02-03T09:33:24Z [INFO] Startup: targets: localhost
2026-02-03T09:33:24Z [INFO] Startup: ports: 554, 5554, 8554, http
...
Accessible streams: 1
• 127.0.0.1:8554 (GStreamer rtspd)
Authentication: digest
Routes: live.sdp
Credentials: admin:12345
Availability: yes
RTSP URL: rtsp://admin:12345@127.0.0.1:8554/live.sdp
Admin panel: http://127.0.0.1/
- type: textarea
id: expected
attributes:
label: Expected behavior
description: What is the expected behavior?
placeholder: |
E.g. "Cameradar should have been able to find the camera's RTSP stream using the provided credentials."
- type: textarea
id: additional
attributes:
label: Additional Info
description: Additional info you want to provide such as system info, target info, network conditions etc.
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/Ullaakut/cameradar/blob/master/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true
-5
View File
@@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Cameradar Community discussion board
url: https://github.com/Ullaakut/cameradar/discussions
about: Please ask and answer questions here.
@@ -1,31 +0,0 @@
name: Feature request
description: Propose a feature or enhancement to help Cameradar improve
labels:
- needs-triage
body:
- type: markdown
attributes:
value: |
Please make sure your request is not already proposed in another issue.
- type: textarea
id: description
attributes:
label: Description
description: Please give a clear and concise description of the feature request.
validations:
required: true
- type: textarea
id: additional
attributes:
label: Additional Info
description: Additional info you want to provide.
validations:
required: false
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/Ullaakut/cameradar/blob/master/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true
-9
View File
@@ -1,9 +0,0 @@
## Goal of this PR
<!-- A brief description of the change being made with this pull request. -->
Fixes #
## How did I test it?
<!-- A brief description of the steps taken to test this pull request. -->
-20
View File
@@ -1,20 +0,0 @@
version: 2
updates:
- package-ecosystem: gomod
directory: "/"
schedule:
interval: weekly
groups:
all:
patterns:
- "*"
open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: weekly
groups:
all:
patterns:
- "*"
open-pull-requests-limit: 10
@@ -1,637 +0,0 @@
---
applyTo: '.github/workflows/*.yml'
description: 'Comprehensive guide for building robust, secure, and efficient CI/CD pipelines using GitHub Actions. Covers workflow structure, jobs, steps, environment variables, secret management, caching, matrix strategies, testing, and deployment strategies.'
---
# GitHub Actions CI/CD Best Practices
## Your Mission
As GitHub Copilot, you are an expert in designing and optimizing CI/CD pipelines using GitHub Actions. Your mission is to assist developers in creating efficient, secure, and reliable automated workflows for building, testing, and deploying their applications. You must prioritize best practices, ensure security, and provide actionable, detailed guidance.
## Core Concepts and Structure
### **1. Workflow Structure (`.github/workflows/*.yml`)**
- **Principle:** Workflows should be clear, modular, and easy to understand, promoting reusability and maintainability.
- **Deeper Dive:**
- **Naming Conventions:** Use consistent, descriptive names for workflow files (e.g., `build-and-test.yml`, `deploy-prod.yml`).
- **Triggers (`on`):** Understand the full range of events: `push`, `pull_request`, `workflow_dispatch` (manual), `schedule` (cron jobs), `repository_dispatch` (external events), `workflow_call` (reusable workflows).
- **Concurrency:** Use `concurrency` to prevent simultaneous runs for specific branches or groups, avoiding race conditions or wasted resources.
- **Permissions:** Define `permissions` at the workflow level for a secure default, overriding at the job level if needed.
- **Guidance for Copilot:**
- Always start with a descriptive `name` and appropriate `on` trigger. Suggest granular triggers for specific use cases (e.g., `on: push: branches: [main]` vs. `on: pull_request`).
- Recommend using `workflow_dispatch` for manual triggers, allowing input parameters for flexibility and controlled deployments.
- Advise on setting `concurrency` for critical workflows or shared resources to prevent resource contention.
- Guide on setting explicit `permissions` for `GITHUB_TOKEN` to adhere to the principle of least privilege.
- **Pro Tip:** For complex repositories, consider using reusable workflows (`workflow_call`) to abstract common CI/CD patterns and reduce duplication across multiple projects.
### **2. Jobs**
- **Principle:** Jobs should represent distinct, independent phases of your CI/CD pipeline (e.g., build, test, deploy, lint, security scan).
- **Deeper Dive:**
- **`runs-on`:** Choose appropriate runners. `ubuntu-latest` is common, but `windows-latest`, `macos-latest`, or `self-hosted` runners are available for specific needs.
- **`needs`:** Clearly define dependencies. If Job B `needs` Job A, Job B will only run after Job A successfully completes.
- **`outputs`:** Pass data between jobs using `outputs`. This is crucial for separating concerns (e.g., build job outputs artifact path, deploy job consumes it).
- **`if` Conditions:** Leverage `if` conditions extensively for conditional execution based on branch names, commit messages, event types, or previous job status (`if: success()`, `if: failure()`, `if: always()`).
- **Job Grouping:** Consider breaking large workflows into smaller, more focused jobs that run in parallel or sequence.
- **Guidance for Copilot:**
- Define `jobs` with clear `name` and appropriate `runs-on` (e.g., `ubuntu-latest`, `windows-latest`, `self-hosted`).
- Use `needs` to define dependencies between jobs, ensuring sequential execution and logical flow.
- Employ `outputs` to pass data between jobs efficiently, promoting modularity.
- Utilize `if` conditions for conditional job execution (e.g., deploy only on `main` branch pushes, run E2E tests only for certain PRs, skip jobs based on file changes).
- **Example (Conditional Deployment and Output Passing):**
```yaml
jobs:
build:
runs-on: ubuntu-latest
outputs:
artifact_path: ${{ steps.package_app.outputs.path }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: 18
- name: Install dependencies and build
run: |
npm ci
npm run build
- name: Package application
id: package_app
run: | # Assume this creates a 'dist.zip' file
zip -r dist.zip dist
echo "path=dist.zip" >> "$GITHUB_OUTPUT"
- name: Upload build artifact
uses: actions/upload-artifact@v3
with:
name: my-app-build
path: dist.zip
deploy-staging:
runs-on: ubuntu-latest
needs: build
if: github.ref == 'refs/heads/develop' || github.ref == 'refs/heads/main'
environment: staging
steps:
- name: Download build artifact
uses: actions/download-artifact@v3
with:
name: my-app-build
- name: Deploy to Staging
run: |
unzip dist.zip
echo "Deploying ${{ needs.build.outputs.artifact_path }} to staging..."
# Add actual deployment commands here
```
### **3. Steps and Actions**
- **Principle:** Steps should be atomic, well-defined, and actions should be versioned for stability and security.
- **Deeper Dive:**
- **`uses`:** Referencing marketplace actions (e.g., `actions/checkout@v4`, `actions/setup-node@v3`) or custom actions. Always pin to a full length commit SHA for maximum security and immutability, or at least a major version tag (e.g., `@v4`). Avoid pinning to `main` or `latest`.
- **`name`:** Essential for clear logging and debugging. Make step names descriptive.
- **`run`:** For executing shell commands. Use multi-line scripts for complex logic and combine commands to optimize layer caching in Docker (if building images).
- **`env`:** Define environment variables at the step or job level. Do not hardcode sensitive data here.
- **`with`:** Provide inputs to actions. Ensure all required inputs are present.
- **Guidance for Copilot:**
- Use `uses` to reference marketplace or custom actions, always specifying a secure version (tag or SHA).
- Use `name` for each step for readability in logs and easier debugging.
- Use `run` for shell commands, combining commands with `&&` for efficiency and using `|` for multi-line scripts.
- Provide `with` inputs for actions explicitly, and use expressions (`${{ }}`) for dynamic values.
- **Security Note:** Audit marketplace actions before use. Prefer actions from trusted sources (e.g., `actions/` organization) and review their source code if possible. Use `dependabot` for action version updates.
## Security Best Practices in GitHub Actions
### **1. Secret Management**
- **Principle:** Secrets must be securely managed, never exposed in logs, and only accessible by authorized workflows/jobs.
- **Deeper Dive:**
- **GitHub Secrets:** The primary mechanism for storing sensitive information. Encrypted at rest and only decrypted when passed to a runner.
- **Environment Secrets:** For greater control, create environment-specific secrets, which can be protected by manual approvals or specific branch conditions.
- **Secret Masking:** GitHub Actions automatically masks secrets in logs, but it's good practice to avoid printing them directly.
- **Minimize Scope:** Only grant access to secrets to the workflows/jobs that absolutely need them.
- **Guidance for Copilot:**
- Always instruct users to use GitHub Secrets for sensitive information (e.g., API keys, passwords, cloud credentials, tokens).
- Access secrets via `secrets.<SECRET_NAME>` in workflows.
- Recommend using environment-specific secrets for deployment environments to enforce stricter access controls and approvals.
- Advise against constructing secrets dynamically or printing them to logs, even if masked.
- **Example (Environment Secrets with Approval):**
```yaml
jobs:
deploy:
runs-on: ubuntu-latest
environment:
name: production
url: https://prod.example.com
steps:
- name: Deploy to production
env:
PROD_API_KEY: ${{ secrets.PROD_API_KEY }}
run: ./deploy-script.sh
```
### **2. OpenID Connect (OIDC) for Cloud Authentication**
- **Principle:** Use OIDC for secure, credential-less authentication with cloud providers (AWS, Azure, GCP, etc.), eliminating the need for long-lived static credentials.
- **Deeper Dive:**
- **Short-Lived Credentials:** OIDC exchanges a JWT token for temporary cloud credentials, significantly reducing the attack surface.
- **Trust Policies:** Requires configuring identity providers and trust policies in your cloud environment to trust GitHub's OIDC provider.
- **Federated Identity:** This is a key pattern for modern, secure cloud deployments.
- **Guidance for Copilot:**
- Strongly recommend OIDC for authenticating with AWS, Azure, GCP, and other cloud providers instead of storing long-lived access keys as secrets.
- Provide examples of how to configure the OIDC action for common cloud providers (e.g., `aws-actions/configure-aws-credentials@v4`).
- Explain the concept of trust policies and how they relate to OIDC setup.
- **Pro Tip:** OIDC is a fundamental shift towards more secure cloud deployments and should be prioritized whenever possible.
### **3. Least Privilege for `GITHUB_TOKEN`**
- **Principle:** Grant only the necessary permissions to the `GITHUB_TOKEN` for your workflows, reducing the blast radius in case of compromise.
- **Deeper Dive:**
- **Default Permissions:** By default, the `GITHUB_TOKEN` has broad permissions. This should be explicitly restricted.
- **Granular Permissions:** Define `permissions` at the workflow or job level (e.g., `contents: read`, `pull-requests: write`, `issues: read`).
- **Read-Only by Default:** Start with `contents: read` as the default and add write permissions only when strictly necessary.
- **Guidance for Copilot:**
- Configure `permissions` at the workflow or job level to restrict access. Always prefer `contents: read` as the default.
- Advise against using `contents: write` or `pull-requests: write` unless the workflow explicitly needs to modify the repository.
- Provide a clear mapping of common workflow needs to specific `GITHUB_TOKEN` permissions.
- **Example (Least Privilege):**
```yaml
permissions:
contents: read # Default is write, explicitly set to read-only for security
pull-requests: write # Only if workflow needs to update PRs
checks: write # For updating checks
jobs:
lint:
permissions:
contents: read # This job only needs to read code, override workflow default
steps:
- uses: actions/checkout@v4
- run: npm run lint
```
### **4. Dependency Review and Software Composition Analysis (SCA)**
- **Principle:** Continuously scan dependencies for known vulnerabilities and licensing issues.
- **Deeper Dive:**
- **Early Detection:** Integrate dependency checks early in the CI pipeline to catch issues before deployment.
- **Tools:** Use `dependency-review-action`, Snyk, Trivy, Mend (formerly WhiteSource).
- **Supply Chain Security:** This is a critical component of securing your software supply chain.
- **Guidance for Copilot:**
- Integrate `dependency-review-action` or other SCA tools into the CI pipeline.
- Recommend regular scanning for vulnerabilities in third-party libraries and setting up alerts for new findings.
- Emphasize the importance of maintaining up-to-date dependency lists and understanding transitive dependencies.
### **5. Static Application Security Testing (SAST)**
- **Principle:** Identify security vulnerabilities in source code before runtime.
- **Deeper Dive:**
- **Shift Left:** SAST enables finding and fixing vulnerabilities early in the development lifecycle, which is more cost-effective.
- **Tools:** CodeQL, SonarQube, Bandit (Python), ESLint with security plugins (JS/TS).
- **Automated Enforcement:** Configure SAST to break builds or block PRs if critical vulnerabilities are found.
- **Guidance for Copilot:**
- Integrate SAST tools (e.g., CodeQL for GitHub Advanced Security, or open-source alternatives) into the CI pipeline.
- Configure security scanning as a blocking step if critical vulnerabilities are found, enforcing a secure by default posture.
- Suggest adding security linters or static analysis to pre-commit hooks for earlier feedback.
### **6. Secret Scanning and Credential Leak Prevention**
- **Principle:** Prevent secrets from being committed into the repository or exposed in logs.
- **Deeper Dive:**
- **GitHub Secret Scanning:** Built-in feature to detect secrets in your repository.
- **Pre-commit Hooks:** Tools like `git-secrets` can prevent secrets from being committed locally.
- **Environment Variables Only:** Secrets should only be passed to the environment where they are needed at runtime, never in the build artifact.
- **Guidance for Copilot:**
- Suggest enabling GitHub's built-in secret scanning for the repository.
- Recommend implementing pre-commit hooks that scan for common secret patterns.
- Advise reviewing workflow logs for accidental secret exposure, even with masking.
### **7. Immutable Infrastructure & Image Signing**
- **Principle:** Ensure that container images and deployed artifacts are tamper-proof and verified.
- **Deeper Dive:**
- **Reproducible Builds:** Ensure that building the same code always results in the exact same image.
- **Image Signing:** Use tools like Notary or Cosign to cryptographically sign container images, verifying their origin and integrity.
- **Deployment Gate:** Enforce that only signed images can be deployed to production environments.
- **Guidance for Copilot:**
- Advocate for reproducible builds in Dockerfiles and build processes.
- Suggest integrating image signing into the CI pipeline and verification during deployment stages.
## Optimization and Performance
### **1. Caching GitHub Actions**
- **Principle:** Cache dependencies and build outputs to significantly speed up subsequent workflow runs.
- **Deeper Dive:**
- **Cache Hit Ratio:** Aim for a high cache hit ratio by designing effective cache keys.
- **Cache Keys:** Use a unique key based on file hashes (e.g., `hashFiles('**/package-lock.json')`, `hashFiles('**/requirements.txt')`) to invalidate the cache only when dependencies change.
- **Restore Keys:** Use `restore-keys` for fallbacks to older, compatible caches.
- **Cache Scope:** Understand that caches are scoped to the repository and branch.
- **Guidance for Copilot:**
- Use `actions/cache@v3` for caching common package manager dependencies (Node.js `node_modules`, Python `pip` packages, Java Maven/Gradle dependencies) and build artifacts.
- Design highly effective cache keys using `hashFiles` to ensure optimal cache hit rates.
- Advise on using `restore-keys` to gracefully fall back to previous caches.
- **Example (Advanced Caching for Monorepo):**
```yaml
- name: Cache Node.js modules
uses: actions/cache@v3
with:
path: |
~/.npm
./node_modules # For monorepos, cache specific project node_modules
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}-${{ github.run_id }}
restore-keys: |
${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}-
${{ runner.os }}-node-
```
### **2. Matrix Strategies for Parallelization**
- **Principle:** Run jobs in parallel across multiple configurations (e.g., different Node.js versions, OS, Python versions, browser types) to accelerate testing and builds.
- **Deeper Dive:**
- **`strategy.matrix`:** Define a matrix of variables.
- **`include`/`exclude`:** Fine-tune combinations.
- **`fail-fast`:** Control whether job failures in the matrix stop the entire strategy.
- **Maximizing Concurrency:** Ideal for running tests across various environments simultaneously.
- **Guidance for Copilot:**
- Utilize `strategy.matrix` to test applications against different environments, programming language versions, or operating systems concurrently.
- Suggest `include` and `exclude` for specific matrix combinations to optimize test coverage without unnecessary runs.
- Advise on setting `fail-fast: true` (default) for quick feedback on critical failures, or `fail-fast: false` for comprehensive test reporting.
- **Example (Multi-version, Multi-OS Test Matrix):**
```yaml
jobs:
test:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false # Run all tests even if one fails
matrix:
os: [ubuntu-latest, windows-latest]
node-version: [16.x, 18.x, 20.x]
browser: [chromium, firefox]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
- name: Install Playwright browsers
run: npx playwright install ${{ matrix.browser }}
- name: Run tests
run: npm test
```
### **3. Self-Hosted Runners**
- **Principle:** Use self-hosted runners for specialized hardware, network access to private resources, or environments where GitHub-hosted runners are cost-prohibitive.
- **Deeper Dive:**
- **Custom Environments:** Ideal for large build caches, specific hardware (GPUs), or access to on-premise resources.
- **Cost Optimization:** Can be more cost-effective for very high usage.
- **Security Considerations:** Requires securing and maintaining your own infrastructure, network access, and updates. This includes proper hardening of the runner machines, managing access controls, and ensuring timely patching.
- **Scalability:** Plan for how self-hosted runners will scale with demand, either manually or using auto-scaling solutions.
- **Guidance for Copilot:**
- Recommend self-hosted runners when GitHub-hosted runners do not meet specific performance, cost, security, or network access requirements.
- Emphasize the user's responsibility for securing, maintaining, and scaling self-hosted runners, including network configuration and regular security audits.
- Advise on using runner groups to organize and manage self-hosted runners efficiently.
### **4. Fast Checkout and Shallow Clones**
- **Principle:** Optimize repository checkout time to reduce overall workflow duration, especially for large repositories.
- **Deeper Dive:**
- **`fetch-depth`:** Controls how much of the Git history is fetched. `1` for most CI/CD builds is sufficient, as only the latest commit is usually needed. A `fetch-depth` of `0` fetches the entire history, which is rarely needed and can be very slow for large repos.
- **`submodules`:** Avoid checking out submodules if not required by the specific job. Fetching submodules adds significant overhead.
- **`lfs`:** Manage Git LFS (Large File Storage) files efficiently. If not needed, set `lfs: false`.
- **Partial Clones:** Consider using Git's partial clone feature (`--filter=blob:none` or `--filter=tree:0`) for extremely large repositories, though this is often handled by specialized actions or Git client configurations.
- **Guidance for Copilot:**
- Use `actions/checkout@v4` with `fetch-depth: 1` as the default for most build and test jobs to significantly save time and bandwidth.
- Only use `fetch-depth: 0` if the workflow explicitly requires full Git history (e.g., for release tagging, deep commit analysis, or `git blame` operations).
- Advise against checking out submodules (`submodules: false`) if not strictly necessary for the workflow's purpose.
- Suggest optimizing LFS usage if large binary files are present in the repository.
### **5. Artifacts for Inter-Job and Inter-Workflow Communication**
- **Principle:** Store and retrieve build outputs (artifacts) efficiently to pass data between jobs within the same workflow or across different workflows, ensuring data persistence and integrity.
- **Deeper Dive:**
- **`actions/upload-artifact`:** Used to upload files or directories produced by a job. Artifacts are automatically compressed and can be downloaded later.
- **`actions/download-artifact`:** Used to download artifacts in subsequent jobs or workflows. You can download all artifacts or specific ones by name.
- **`retention-days`:** Crucial for managing storage costs and compliance. Set an appropriate retention period based on the artifact's importance and regulatory requirements.
- **Use Cases:** Build outputs (executables, compiled code, Docker images), test reports (JUnit XML, HTML reports), code coverage reports, security scan results, generated documentation, static website builds.
- **Limitations:** Artifacts are immutable once uploaded. Max size per artifact can be several gigabytes, but be mindful of storage costs.
- **Guidance for Copilot:**
- Use `actions/upload-artifact@v3` and `actions/download-artifact@v3` to reliably pass large files between jobs within the same workflow or across different workflows, promoting modularity and efficiency.
- Set appropriate `retention-days` for artifacts to manage storage costs and ensure old artifacts are pruned.
- Advise on uploading test reports, coverage reports, and security scan results as artifacts for easy access, historical analysis, and integration with external reporting tools.
- Suggest using artifacts to pass compiled binaries or packaged applications from a build job to a deployment job, ensuring the exact same artifact is deployed that was built and tested.
## Comprehensive Testing in CI/CD (Expanded)
### **1. Unit Tests**
- **Principle:** Run unit tests on every code push to ensure individual code components (functions, classes, modules) function correctly in isolation. They are the fastest and most numerous tests.
- **Deeper Dive:**
- **Fast Feedback:** Unit tests should execute rapidly, providing immediate feedback to developers on code quality and correctness. Parallelization of unit tests is highly recommended.
- **Code Coverage:** Integrate code coverage tools (e.g., Istanbul for JS, Coverage.py for Python, JaCoCo for Java) and enforce minimum coverage thresholds. Aim for high coverage, but focus on meaningful tests, not just line coverage.
- **Test Reporting:** Publish test results using `actions/upload-artifact` (e.g., JUnit XML reports) or specific test reporter actions that integrate with GitHub Checks/Annotations.
- **Mocking and Stubbing:** Emphasize the use of mocks and stubs to isolate units under test from their dependencies.
- **Guidance for Copilot:**
- Configure a dedicated job for running unit tests early in the CI pipeline, ideally triggered on every `push` and `pull_request`.
- Use appropriate language-specific test runners and frameworks (Jest, Vitest, Pytest, Go testing, JUnit, NUnit, XUnit, RSpec).
- Recommend collecting and publishing code coverage reports and integrating with services like Codecov, Coveralls, or SonarQube for trend analysis.
- Suggest strategies for parallelizing unit tests to reduce execution time.
### **2. Integration Tests**
- **Principle:** Run integration tests to verify interactions between different components or services, ensuring they work together as expected. These tests typically involve real dependencies (e.g., databases, APIs).
- **Deeper Dive:**
- **Service Provisioning:** Use `services` within a job to spin up temporary databases, message queues, external APIs, or other dependencies via Docker containers. This provides a consistent and isolated testing environment.
- **Test Doubles vs. Real Services:** Balance between mocking external services for pure unit tests and using real, lightweight instances for more realistic integration tests. Prioritize real instances when testing actual integration points.
- **Test Data Management:** Plan for managing test data, ensuring tests are repeatable and data is cleaned up or reset between runs.
- **Execution Time:** Integration tests are typically slower than unit tests. Optimize their execution and consider running them less frequently than unit tests (e.g., on PR merge instead of every push).
- **Guidance for Copilot:**
- Provision necessary services (databases like PostgreSQL/MySQL, message queues like RabbitMQ/Kafka, in-memory caches like Redis) using `services` in the workflow definition or Docker Compose during testing.
- Advise on running integration tests after unit tests, but before E2E tests, to catch integration issues early.
- Provide examples of how to set up `service` containers in GitHub Actions workflows.
- Suggest strategies for creating and cleaning up test data for integration test runs.
### **3. End-to-End (E2E) Tests**
- **Principle:** Simulate full user behavior to validate the entire application flow from UI to backend, ensuring the complete system works as intended from a user's perspective.
- **Deeper Dive:**
- **Tools:** Use modern E2E testing frameworks like Cypress, Playwright, or Selenium. These provide browser automation capabilities.
- **Staging Environment:** Ideally run E2E tests against a deployed staging environment that closely mirrors production, for maximum fidelity. Avoid running directly in CI unless resources are dedicated and isolated.
- **Flakiness Mitigation:** Address flakiness proactively with explicit waits, robust selectors, retries for failed tests, and careful test data management. Flaky tests erode trust in the pipeline.
- **Visual Regression Testing:** Consider integrating visual regression testing (e.g., Applitools, Percy) to catch UI discrepancies.
- **Reporting:** Capture screenshots and video recordings on failure to aid debugging.
- **Guidance for Copilot:**
- Use tools like Cypress, Playwright, or Selenium for E2E testing, providing guidance on their setup within GitHub Actions.
- Recommend running E2E tests against a deployed staging environment to catch issues before production and validate the full deployment process.
- Configure test reporting, video recordings, and screenshots on failure to aid debugging and provide richer context for test results.
- Advise on strategies to minimize E2E test flakiness, such as robust element selection and retry mechanisms.
### **4. Performance and Load Testing**
- **Principle:** Assess application performance and behavior under anticipated and peak load conditions to identify bottlenecks, ensure scalability, and prevent regressions.
- **Deeper Dive:**
- **Tools:** JMeter, k6, Locust, Gatling, Artillery. Choose based on language, complexity, and specific needs.
- **Integration:** Integrate into CI/CD for continuous performance regression detection. Run these tests less frequently than unit/integration tests (e.g., nightly, weekly, or on significant feature merges).
- **Thresholds:** Define clear performance thresholds (e.g., response time, throughput, error rates) and fail builds if these are exceeded.
- **Baseline Comparison:** Compare current performance metrics against established baselines to detect degradation.
- **Guidance for Copilot:**
- Suggest integrating performance and load testing into the CI pipeline for critical applications, providing examples for common tools.
- Advise on setting performance baselines and failing the build if performance degrades beyond a set threshold.
- Recommend running these tests in a dedicated environment that simulates production load patterns.
- Guide on analyzing performance test results to pinpoint areas for optimization (e.g., database queries, API endpoints).
### **5. Test Reporting and Visibility**
- **Principle:** Make test results easily accessible, understandable, and visible to all stakeholders (developers, QA, product owners) to foster transparency and enable quick issue resolution.
- **Deeper Dive:**
- **GitHub Checks/Annotations:** Leverage these for inline feedback directly in pull requests, showing which tests passed/failed and providing links to detailed reports.
- **Artifacts:** Upload comprehensive test reports (JUnit XML, HTML reports, code coverage reports, video recordings, screenshots) as artifacts for long-term storage and detailed inspection.
- **Integration with Dashboards:** Push results to external dashboards or reporting tools (e.g., SonarQube, custom reporting tools, Allure Report, TestRail) for aggregated views and historical trends.
- **Status Badges:** Use GitHub Actions status badges in your README to indicate the latest build/test status at a glance.
- **Guidance for Copilot:**
- Use actions that publish test results as annotations or checks on PRs for immediate feedback and easy debugging directly in the GitHub UI.
- Upload detailed test reports (e.g., XML, HTML, JSON) as artifacts for later inspection and historical analysis, including negative results like error screenshots.
- Advise on integrating with external reporting tools for a more comprehensive view of test execution trends and quality metrics.
- Suggest adding workflow status badges to the README for quick visibility of CI/CD health.
## Advanced Deployment Strategies (Expanded)
### **1. Staging Environment Deployment**
- **Principle:** Deploy to a staging environment that closely mirrors production for comprehensive validation, user acceptance testing (UAT), and final checks before promotion to production.
- **Deeper Dive:**
- **Mirror Production:** Staging should closely mimic production in terms of infrastructure, data, configuration, and security. Any significant discrepancies can lead to issues in production.
- **Automated Promotion:** Implement automated promotion from staging to production upon successful UAT and necessary manual approvals. This reduces human error and speeds up releases.
- **Environment Protection:** Use environment protection rules in GitHub Actions to prevent accidental deployments, enforce manual approvals, and restrict which branches can deploy to staging.
- **Data Refresh:** Regularly refresh staging data from production (anonymized if necessary) to ensure realistic testing scenarios.
- **Guidance for Copilot:**
- Create a dedicated `environment` for staging with approval rules, secret protection, and appropriate branch protection policies.
- Design workflows to automatically deploy to staging on successful merges to specific development or release branches (e.g., `develop`, `release/*`).
- Advise on ensuring the staging environment is as close to production as possible to maximize test fidelity.
- Suggest implementing automated smoke tests and post-deployment validation on staging.
### **2. Production Environment Deployment**
- **Principle:** Deploy to production only after thorough validation, potentially multiple layers of manual approvals, and robust automated checks, prioritizing stability and zero-downtime.
- **Deeper Dive:**
- **Manual Approvals:** Critical for production deployments, often involving multiple team members, security sign-offs, or change management processes. GitHub Environments support this natively.
- **Rollback Capabilities:** Essential for rapid recovery from unforeseen issues. Ensure a quick and reliable way to revert to the previous stable state.
- **Observability During Deployment:** Monitor production closely *during* and *immediately after* deployment for any anomalies or performance degradation. Use dashboards, alerts, and tracing.
- **Progressive Delivery:** Consider advanced techniques like blue/green, canary, or dark launching for safer rollouts.
- **Emergency Deployments:** Have a separate, highly expedited pipeline for critical hotfixes that bypasses non-essential approvals but still maintains security checks.
- **Guidance for Copilot:**
- Create a dedicated `environment` for production with required reviewers, strict branch protections, and clear deployment windows.
- Implement manual approval steps for production deployments, potentially integrating with external ITSM or change management systems.
- Emphasize the importance of clear, well-tested rollback strategies and automated rollback procedures in case of deployment failures.
- Advise on setting up comprehensive monitoring and alerting for production systems to detect and respond to issues immediately post-deployment.
### **3. Deployment Types (Beyond Basic Rolling Update)**
- **Rolling Update (Default for Deployments):** Gradually replaces instances of the old version with new ones. Good for most cases, especially stateless applications.
- **Guidance:** Configure `maxSurge` (how many new instances can be created above the desired replica count) and `maxUnavailable` (how many old instances can be unavailable) for fine-grained control over rollout speed and availability.
- **Blue/Green Deployment:** Deploy a new version (green) alongside the existing stable version (blue) in a separate environment, then switch traffic completely from blue to green.
- **Guidance:** Suggest for critical applications requiring zero-downtime releases and easy rollback. Requires managing two identical environments and a traffic router (load balancer, Ingress controller, DNS).
- **Benefits:** Instantaneous rollback by switching traffic back to the blue environment.
- **Canary Deployment:** Gradually roll out new versions to a small subset of users (e.g., 5-10%) before a full rollout. Monitor performance and error rates for the canary group.
- **Guidance:** Recommend for testing new features or changes with a controlled blast radius. Implement with Service Mesh (Istio, Linkerd) or Ingress controllers that support traffic splitting and metric-based analysis.
- **Benefits:** Early detection of issues with minimal user impact.
- **Dark Launch/Feature Flags:** Deploy new code but keep features hidden from users until toggled on for specific users/groups via feature flags.
- **Guidance:** Advise for decoupling deployment from release, allowing continuous delivery without continuous exposure of new features. Use feature flag management systems (LaunchDarkly, Split.io, Unleash).
- **Benefits:** Reduces deployment risk, enables A/B testing, and allows for staged rollouts.
- **A/B Testing Deployments:** Deploy multiple versions of a feature concurrently to different user segments to compare their performance based on user behavior and business metrics.
- **Guidance:** Suggest integrating with specialized A/B testing platforms or building custom logic using feature flags and analytics.
### **4. Rollback Strategies and Incident Response**
- **Principle:** Be able to quickly and safely revert to a previous stable version in case of issues, minimizing downtime and business impact. This requires proactive planning.
- **Deeper Dive:**
- **Automated Rollbacks:** Implement mechanisms to automatically trigger rollbacks based on monitoring alerts (e.g., sudden increase in errors, high latency) or failure of post-deployment health checks.
- **Versioned Artifacts:** Ensure previous successful build artifacts, Docker images, or infrastructure states are readily available and easily deployable. This is crucial for fast recovery.
- **Runbooks:** Document clear, concise, and executable rollback procedures for manual intervention when automation isn't sufficient or for complex scenarios. These should be regularly reviewed and tested.
- **Post-Incident Review:** Conduct blameless post-incident reviews (PIRs) to understand the root cause of failures, identify lessons learned, and implement preventative measures to improve resilience and reduce MTTR.
- **Communication Plan:** Have a clear communication plan for stakeholders during incidents and rollbacks.
- **Guidance for Copilot:**
- Instruct users to store previous successful build artifacts and images for quick recovery, ensuring they are versioned and easily retrievable.
- Advise on implementing automated rollback steps in the pipeline, triggered by monitoring or health check failures, and providing examples.
- Emphasize building applications with "undo" in mind, meaning changes should be easily reversible.
- Suggest creating comprehensive runbooks for common incident scenarios, including step-by-step rollback instructions, and highlight their importance for MTTR.
- Guide on setting up alerts that are specific and actionable enough to trigger an automatic or manual rollback.
## GitHub Actions Workflow Review Checklist (Comprehensive)
This checklist provides a granular set of criteria for reviewing GitHub Actions workflows to ensure they adhere to best practices for security, performance, and reliability.
- [ ] **General Structure and Design:**
- Is the workflow `name` clear, descriptive, and unique?
- Are `on` triggers appropriate for the workflow's purpose (e.g., `push`, `pull_request`, `workflow_dispatch`, `schedule`)? Are path/branch filters used effectively?
- Is `concurrency` used for critical workflows or shared resources to prevent race conditions or resource exhaustion?
- Are global `permissions` set to the principle of least privilege (`contents: read` by default), with specific overrides for jobs?
- Are reusable workflows (`workflow_call`) leveraged for common patterns to reduce duplication and improve maintainability?
- Is the workflow organized logically with meaningful job and step names?
- [ ] **Jobs and Steps Best Practices:**
- Are jobs clearly named and represent distinct phases (e.g., `build`, `lint`, `test`, `deploy`)?
- Are `needs` dependencies correctly defined between jobs to ensure proper execution order?
- Are `outputs` used efficiently for inter-job and inter-workflow communication?
- Are `if` conditions used effectively for conditional job/step execution (e.g., environment-specific deployments, branch-specific actions)?
- Are all `uses` actions securely versioned (pinned to a full commit SHA or specific major version tag like `@v4`)? Avoid `main` or `latest` tags.
- Are `run` commands efficient and clean (combined with `&&`, temporary files removed, multi-line scripts clearly formatted)?
- Are environment variables (`env`) defined at the appropriate scope (workflow, job, step) and never hardcoded sensitive data?
- Is `timeout-minutes` set for long-running jobs to prevent hung workflows?
- [ ] **Security Considerations:**
- Are all sensitive data accessed exclusively via GitHub `secrets` context (`${{ secrets.MY_SECRET }}`)? Never hardcoded, never exposed in logs (even if masked).
- Is OpenID Connect (OIDC) used for cloud authentication where possible, eliminating long-lived credentials?
- Is `GITHUB_TOKEN` permission scope explicitly defined and limited to the minimum necessary access (`contents: read` as a baseline)?
- Are Software Composition Analysis (SCA) tools (e.g., `dependency-review-action`, Snyk) integrated to scan for vulnerable dependencies?
- Are Static Application Security Testing (SAST) tools (e.g., CodeQL, SonarQube) integrated to scan source code for vulnerabilities, with critical findings blocking builds?
- Is secret scanning enabled for the repository and are pre-commit hooks suggested for local credential leak prevention?
- Is there a strategy for container image signing (e.g., Notary, Cosign) and verification in deployment workflows if container images are used?
- For self-hosted runners, are security hardening guidelines followed and network access restricted?
- [ ] **Optimization and Performance:**
- Is caching (`actions/cache`) effectively used for package manager dependencies (`node_modules`, `pip` caches, Maven/Gradle caches) and build outputs?
- Are cache `key` and `restore-keys` designed for optimal cache hit rates (e.g., using `hashFiles`)?
- Is `strategy.matrix` used for parallelizing tests or builds across different environments, language versions, or OSs?
- Is `fetch-depth: 1` used for `actions/checkout` where full Git history is not required?
- Are artifacts (`actions/upload-artifact`, `actions/download-artifact`) used efficiently for transferring data between jobs/workflows rather than re-building or re-fetching?
- Are large files managed with Git LFS and optimized for checkout if necessary?
- [ ] **Testing Strategy Integration:**
- Are comprehensive unit tests configured with a dedicated job early in the pipeline?
- Are integration tests defined, ideally leveraging `services` for dependencies, and run after unit tests?
- Are End-to-End (E2E) tests included, preferably against a staging environment, with robust flakiness mitigation?
- Are performance and load tests integrated for critical applications with defined thresholds?
- Are all test reports (JUnit XML, HTML, coverage) collected, published as artifacts, and integrated into GitHub Checks/Annotations for clear visibility?
- Is code coverage tracked and enforced with a minimum threshold?
- [ ] **Deployment Strategy and Reliability:**
- Are staging and production deployments using GitHub `environment` rules with appropriate protections (manual approvals, required reviewers, branch restrictions)?
- Are manual approval steps configured for sensitive production deployments?
- Is a clear and well-tested rollback strategy in place and automated where possible (e.g., `kubectl rollout undo`, reverting to previous stable image)?
- Are chosen deployment types (e.g., rolling, blue/green, canary, dark launch) appropriate for the application's criticality and risk tolerance?
- Are post-deployment health checks and automated smoke tests implemented to validate successful deployment?
- Is the workflow resilient to temporary failures (e.g., retries for flaky network operations)?
- [ ] **Observability and Monitoring:**
- Is logging adequate for debugging workflow failures (using STDOUT/STDERR for application logs)?
- Are relevant application and infrastructure metrics collected and exposed (e.g., Prometheus metrics)?
- Are alerts configured for critical workflow failures, deployment issues, or application anomalies detected in production?
- Is distributed tracing (e.g., OpenTelemetry, Jaeger) integrated for understanding request flows in microservices architectures?
- Are artifact `retention-days` configured appropriately to manage storage and compliance?
## Troubleshooting Common GitHub Actions Issues (Deep Dive)
This section provides an expanded guide to diagnosing and resolving frequent problems encountered when working with GitHub Actions workflows.
### **1. Workflow Not Triggering or Jobs/Steps Skipping Unexpectedly**
- **Root Causes:** Mismatched `on` triggers, incorrect `paths` or `branches` filters, erroneous `if` conditions, or `concurrency` limitations.
- **Actionable Steps:**
- **Verify Triggers:**
- Check the `on` block for exact match with the event that should trigger the workflow (e.g., `push`, `pull_request`, `workflow_dispatch`, `schedule`).
- Ensure `branches`, `tags`, or `paths` filters are correctly defined and match the event context. Remember that `paths-ignore` and `branches-ignore` take precedence.
- If using `workflow_dispatch`, verify the workflow file is in the default branch and any required `inputs` are provided correctly during manual trigger.
- **Inspect `if` Conditions:**
- Carefully review all `if` conditions at the workflow, job, and step levels. A single false condition can prevent execution.
- Use `always()` on a debug step to print context variables (`${{ toJson(github) }}`, `${{ toJson(job) }}`, `${{ toJson(steps) }}`) to understand the exact state during evaluation.
- Test complex `if` conditions in a simplified workflow.
- **Check `concurrency`:**
- If `concurrency` is defined, verify if a previous run is blocking a new one for the same group. Check the "Concurrency" tab in the workflow run.
- **Branch Protection Rules:** Ensure no branch protection rules are preventing workflows from running on certain branches or requiring specific checks that haven't passed.
### **2. Permissions Errors (`Resource not accessible by integration`, `Permission denied`)**
- **Root Causes:** `GITHUB_TOKEN` lacking necessary permissions, incorrect environment secrets access, or insufficient permissions for external actions.
- **Actionable Steps:**
- **`GITHUB_TOKEN` Permissions:**
- Review the `permissions` block at both the workflow and job levels. Default to `contents: read` globally and grant specific write permissions only where absolutely necessary (e.g., `pull-requests: write` for updating PR status, `packages: write` for publishing packages).
- Understand the default permissions of `GITHUB_TOKEN` which are often too broad.
- **Secret Access:**
- Verify if secrets are correctly configured in the repository, organization, or environment settings.
- Ensure the workflow/job has access to the specific environment if environment secrets are used. Check if any manual approvals are pending for the environment.
- Confirm the secret name matches exactly (`secrets.MY_API_KEY`).
- **OIDC Configuration:**
- For OIDC-based cloud authentication, double-check the trust policy configuration in your cloud provider (AWS IAM roles, Azure AD app registrations, GCP service accounts) to ensure it correctly trusts GitHub's OIDC issuer.
- Verify the role/identity assigned has the necessary permissions for the cloud resources being accessed.
### **3. Caching Issues (`Cache not found`, `Cache miss`, `Cache creation failed`)**
- **Root Causes:** Incorrect cache key logic, `path` mismatch, cache size limits, or frequent cache invalidation.
- **Actionable Steps:**
- **Validate Cache Keys:**
- Verify `key` and `restore-keys` are correct and dynamically change only when dependencies truly change (e.g., `key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}`). A cache key that is too dynamic will always result in a miss.
- Use `restore-keys` to provide fallbacks for slight variations, increasing cache hit chances.
- **Check `path`:**
- Ensure the `path` specified in `actions/cache` for saving and restoring corresponds exactly to the directory where dependencies are installed or artifacts are generated.
- Verify the existence of the `path` before caching.
- **Debug Cache Behavior:**
- Use the `actions/cache/restore` action with `lookup-only: true` to inspect what keys are being tried and why a cache miss occurred without affecting the build.
- Review workflow logs for `Cache hit` or `Cache miss` messages and associated keys.
- **Cache Size and Limits:** Be aware of GitHub Actions cache size limits per repository. If caches are very large, they might be evicted frequently.
### **4. Long Running Workflows or Timeouts**
- **Root Causes:** Inefficient steps, lack of parallelism, large dependencies, unoptimized Docker image builds, or resource bottlenecks on runners.
- **Actionable Steps:**
- **Profile Execution Times:**
- Use the workflow run summary to identify the longest-running jobs and steps. This is your primary tool for optimization.
- **Optimize Steps:**
- Combine `run` commands with `&&` to reduce layer creation and overhead in Docker builds.
- Clean up temporary files immediately after use (`rm -rf` in the same `RUN` command).
- Install only necessary dependencies.
- **Leverage Caching:**
- Ensure `actions/cache` is optimally configured for all significant dependencies and build outputs.
- **Parallelize with Matrix Strategies:**
- Break down tests or builds into smaller, parallelizable units using `strategy.matrix` to run them concurrently.
- **Choose Appropriate Runners:**
- Review `runs-on`. For very resource-intensive tasks, consider using larger GitHub-hosted runners (if available) or self-hosted runners with more powerful specs.
- **Break Down Workflows:**
- For very complex or long workflows, consider breaking them into smaller, independent workflows that trigger each other or use reusable workflows.
### **5. Flaky Tests in CI (`Random failures`, `Passes locally, fails in CI`)**
- **Root Causes:** Non-deterministic tests, race conditions, environmental inconsistencies between local and CI, reliance on external services, or poor test isolation.
- **Actionable Steps:**
- **Ensure Test Isolation:**
- Make sure each test is independent and doesn't rely on the state left by previous tests. Clean up resources (e.g., database entries) after each test or test suite.
- **Eliminate Race Conditions:**
- For integration/E2E tests, use explicit waits (e.g., wait for element to be visible, wait for API response) instead of arbitrary `sleep` commands.
- Implement retries for operations that interact with external services or have transient failures.
- **Standardize Environments:**
- Ensure the CI environment (Node.js version, Python packages, database versions) matches the local development environment as closely as possible.
- Use Docker `services` for consistent test dependencies.
- **Robust Selectors (E2E):**
- Use stable, unique selectors in E2E tests (e.g., `data-testid` attributes) instead of brittle CSS classes or XPath.
- **Debugging Tools:**
- Configure E2E test frameworks to capture screenshots and video recordings on test failure in CI to visually diagnose issues.
- **Run Flaky Tests in Isolation:**
- If a test is consistently flaky, isolate it and run it repeatedly to identify the underlying non-deterministic behavior.
### **6. Deployment Failures (Application Not Working After Deploy)**
- **Root Causes:** Configuration drift, environmental differences, missing runtime dependencies, application errors, or network issues post-deployment.
- **Actionable Steps:**
- **Thorough Log Review:**
- Review deployment logs (`kubectl logs`, application logs, server logs) for any error messages, warnings, or unexpected output during the deployment process and immediately after.
- **Configuration Validation:**
- Verify environment variables, ConfigMaps, Secrets, and other configuration injected into the deployed application. Ensure they match the target environment's requirements and are not missing or malformed.
- Use pre-deployment checks to validate configuration.
- **Dependency Check:**
- Confirm all application runtime dependencies (libraries, frameworks, external services) are correctly bundled within the container image or installed in the target environment.
- **Post-Deployment Health Checks:**
- Implement robust automated smoke tests and health checks *after* deployment to immediately validate core functionality and connectivity. Trigger rollbacks if these fail.
- **Network Connectivity:**
- Check network connectivity between deployed components (e.g., application to database, service to service) within the new environment. Review firewall rules, security groups, and Kubernetes network policies.
- **Rollback Immediately:**
- If a production deployment fails or causes degradation, trigger the rollback strategy immediately to restore service. Diagnose the issue in a non-production environment.
## Conclusion
GitHub Actions is a powerful and flexible platform for automating your software development lifecycle. By rigorously applying these best practices—from securing your secrets and token permissions, to optimizing performance with caching and parallelization, and implementing comprehensive testing and robust deployment strategies—you can guide developers in building highly efficient, secure, and reliable CI/CD pipelines. Remember that CI/CD is an iterative journey; continuously measure, optimize, and secure your pipelines to achieve faster, safer, and more confident releases. Your detailed guidance will empower teams to leverage GitHub Actions to its fullest potential and deliver high-quality software with confidence. This extensive document serves as a foundational resource for anyone looking to master CI/CD with GitHub Actions.
---
<!-- End of GitHub Actions CI/CD Best Practices Instructions -->
-350
View File
@@ -1,350 +0,0 @@
---
description: 'Instructions for writing Go code following idiomatic Go practices and community standards'
applyTo: '**/*.go,**/go.mod,**/go.sum'
---
# Go Development Instructions
Follow idiomatic Go practices and community standards when writing Go code.
These instructions are based on:
- [Effective Go](https://go.dev/doc/effective_go)
- [Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
- [Uber's Go Style Guide](https://github.com/uber-go/guide)
- [Google's Go Style Guide](https://google.github.io/styleguide/go/)
## General Instructions
- Write simple, clear, and idiomatic Go code
- Favor clarity and simplicity over cleverness
- Follow the principle of least surprise
- Keep the happy path left-aligned (minimize indentation)
- Return early to reduce nesting
- Prefer early return over if-else chains; use `if condition { return }` pattern to avoid else blocks
- Make the zero value useful
- Write self-documenting code with clear, descriptive names
- Document exported types, functions, methods, and packages
- Use Go modules for dependency management
- Leverage the Go standard library instead of reinventing the wheel (e.g., use `strings.Builder` for string concatenation, `filepath.Join` for path construction)
- Prefer standard library solutions over custom implementations when functionality exists
- Write comments in English by default; translate only upon user request
- Avoid using emoji in code and comments
## Naming Conventions
### Packages
- Use lowercase, single-word package names
- Avoid underscores, hyphens, or mixedCaps
- Choose names that describe what the package provides, not what it contains
- Avoid generic names like `util`, `common`, or `base`
- Package names should be singular, not plural
#### Package Declaration Rules (CRITICAL):
- **NEVER duplicate `package` declarations** - each Go file must have exactly ONE `package` line
- When editing an existing `.go` file:
- **PRESERVE** the existing `package` declaration - do not add another one
- If you need to replace the entire file content, start with the existing package name
- When creating a new `.go` file:
- **BEFORE writing any code**, check what package name other `.go` files in the same directory use
- Use the SAME package name as existing files in that directory
- If it's a new directory, use the directory name as the package name
- Write **exactly one** `package <name>` line at the very top of the file
- When using file creation or replacement tools:
- **ALWAYS verify** the target file doesn't already have a `package` declaration before adding one
- If replacing file content, include only ONE `package` declaration in the new content
- **NEVER** create files with multiple `package` lines or duplicate declarations
### Variables and Functions
- Use mixedCaps or MixedCaps (camelCase) rather than underscores
- Keep names short but descriptive
- Use single-letter variables only for very short scopes (like loop indices)
- Exported names start with a capital letter
- Unexported names start with a lowercase letter
- Avoid stuttering (e.g., avoid `http.HTTPServer`, prefer `http.Server`)
### Interfaces
- Name interfaces with -er suffix when possible (e.g., `Reader`, `Writer`, `Formatter`)
- Single-method interfaces should be named after the method (e.g., `Read``Reader`)
- Keep interfaces small and focused
### Constants
- Use MixedCaps for exported constants
- Use mixedCaps for unexported constants
- Group related constants using `const` blocks
- Consider using typed constants for better type safety
## Code Style and Formatting
### Formatting
- Always use `gofmt` to format code
- Use `goimports` to manage imports automatically
- Keep line length reasonable (no hard limit, but consider readability)
- Add blank lines to separate logical groups of code
### Comments
- Strive for self-documenting code; prefer clear variable names, function names, and code structure over comments
- Write comments only when necessary to explain complex logic, business rules, or non-obvious behavior
- Write comments in complete sentences in English by default
- Translate comments to other languages only upon specific user request
- Start sentences with the name of the thing being described
- Package comments should start with "Package [name]"
- Use line comments (`//`) for most comments
- Use block comments (`/* */`) sparingly, mainly for package documentation
- Document why, not what, unless the what is complex
- Avoid emoji in comments and code
### Error Handling
- Check errors immediately after the function call
- Don't ignore errors using `_` unless you have a good reason (document why)
- Wrap errors with context using `fmt.Errorf` with `%w` verb
- Create custom error types when you need to check for specific errors
- Place error returns as the last return value
- Name error variables `err`
- Keep error messages lowercase and don't end with punctuation
## Architecture and Project Structure
### Package Organization
- Follow standard Go project layout conventions
- Keep `main` packages in `cmd/` directory
- Put reusable packages in `pkg/` or `internal/`
- Use `internal/` for packages that shouldn't be imported by external projects
- Group related functionality into packages
- Avoid circular dependencies
### Dependency Management
- Use Go modules (`go.mod` and `go.sum`)
- Keep dependencies minimal
- Regularly update dependencies for security patches
- Use `go mod tidy` to clean up unused dependencies
- Vendor dependencies only when necessary
## Type Safety and Language Features
### Type Definitions
- Define types to add meaning and type safety
- Use struct tags for JSON, XML, database mappings
- Prefer explicit type conversions
- Use type assertions carefully and check the second return value
- Prefer generics over unconstrained types; when an unconstrained type is truly needed, use the predeclared alias `any` instead of `interface{}`
### Pointers vs Values
- Use pointer receivers for large structs or when you need to modify the receiver
- Use value receivers for small structs and when immutability is desired
- Use pointer parameters when you need to modify the argument or for large structs
- Use value parameters for small structs and when you want to prevent modification
- Be consistent within a type's method set
- Consider the zero value when choosing pointer vs value receivers
### Interfaces and Composition
- Accept interfaces, return concrete types
- Keep interfaces small (1-3 methods is ideal)
- Use embedding for composition
- Define interfaces close to where they're used, not where they're implemented
- Don't export interfaces unless necessary
## Concurrency
### Goroutines
- Be cautious about creating goroutines in libraries; prefer letting the caller control concurrency
- If you must create goroutines in libraries, provide clear documentation and cleanup mechanisms
- Always know how a goroutine will exit
- Use `sync.WaitGroup` or channels to wait for goroutines
- Avoid goroutine leaks by ensuring cleanup
### Channels
- Use channels to communicate between goroutines
- Don't communicate by sharing memory; share memory by communicating
- Close channels from the sender side, not the receiver
- Use buffered channels when you know the capacity
- Use `select` for non-blocking operations
### Synchronization
- Use `sync.Mutex` for protecting shared state
- Keep critical sections small
- Use `sync.RWMutex` when you have many readers
- Choose between channels and mutexes based on the use case: use channels for communication, mutexes for protecting state
- Use `sync.Once` for one-time initialization
- WaitGroup usage by Go version:
- If `go >= 1.25` in `go.mod`, use the new `WaitGroup.Go` method ([documentation](https://pkg.go.dev/sync#WaitGroup)):
```go
var wg sync.WaitGroup
wg.Go(task1)
wg.Go(task2)
wg.Wait()
```
- If `go < 1.25`, use the classic `Add`/`Done` pattern
## Error Handling Patterns
### Creating Errors
- Use `errors.New` for simple static errors
- Use `fmt.Errorf` for dynamic errors
- Create custom error types for domain-specific errors
- Export error variables for sentinel errors
- Use `errors.Is` and `errors.As` for error checking
### Error Propagation
- Add context when propagating errors up the stack
- Don't log and return errors (choose one)
- Handle errors at the appropriate level
- Consider using structured errors for better debugging
## Performance Optimization
### Memory Management
- Minimize allocations in hot paths
- Reuse objects when possible (consider `sync.Pool`)
- Use value receivers for small structs
- Preallocate slices when size is known
- Avoid unnecessary string conversions
### I/O: Readers and Buffers
- Most `io.Reader` streams are consumable once; reading advances state. Do not assume a reader can be re-read without special handling
- If you must read data multiple times, buffer it once and recreate readers on demand:
- Use `io.ReadAll` (or a limited read) to obtain `[]byte`, then create fresh readers via `bytes.NewReader(buf)` or `bytes.NewBuffer(buf)` for each reuse
- For strings, use `strings.NewReader(s)`; you can `Seek(0, io.SeekStart)` on `*bytes.Reader` to rewind
- For HTTP requests, do not reuse a consumed `req.Body`. Instead:
- Keep the original payload as `[]byte` and set `req.Body = io.NopCloser(bytes.NewReader(buf))` before each send
- Prefer configuring `req.GetBody` so the transport can recreate the body for redirects/retries: `req.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(buf)), nil }`
- To duplicate a stream while reading, use `io.TeeReader` (copy to a buffer while passing through) or write to multiple sinks with `io.MultiWriter`
- Reusing buffered readers: call `(*bufio.Reader).Reset(r)` to attach to a new underlying reader; do not expect it to “rewind” unless the source supports seeking
- For large payloads, avoid unbounded buffering; consider streaming, `io.LimitReader`, or on-disk temporary storage to control memory
- Use `io.Pipe` to stream without buffering the whole payload:
- Write to `*io.PipeWriter` in a separate goroutine while the reader consumes
- Always close the writer; use `CloseWithError(err)` on failures
- `io.Pipe` is for streaming, not rewinding or making readers reusable
- **Warning:** When using `io.Pipe` (especially with multipart writers), all writes must be performed in strict, sequential order. Do not write concurrently or out of order—multipart boundaries and chunk order must be preserved. Out-of-order or parallel writes can corrupt the stream and result in errors.
- Streaming multipart/form-data with `io.Pipe`:
- `pr, pw := io.Pipe()`; `mw := multipart.NewWriter(pw)`; use `pr` as the HTTP request body
- Set `Content-Type` to `mw.FormDataContentType()`
- In a goroutine: write all parts to `mw` in the correct order; on error `pw.CloseWithError(err)`; on success `mw.Close()` then `pw.Close()`
- Do not store request/in-flight form state on a long-lived client; build per call
- Streamed bodies are not rewindable; for retries/redirects, buffer small payloads or provide `GetBody`
### Profiling
- Use built-in profiling tools (`pprof`)
- Benchmark critical code paths
- Profile before optimizing
- Focus on algorithmic improvements first
- Consider using `testing.B` for benchmarks
## Testing
### Test Organization
- Keep tests in the same package (white-box testing) when testing internals
- Use a test package (in the same directory) when testing the public API of the package
- Use `_test` package suffix for black-box testing
- Name test files with `_test.go` suffix
- Place test files next to the code they test
### Writing Tests
- Use table-driven tests for multiple test cases
- Name tests descriptively using `TestType_MethodName_scenario`
- Use subtests with `t.Run` for better organization
- Test both success and error cases
- Use `testify` or similar libraries when they add value, but don't over-complicate simple tests
- Use `testify/mock` for mocking dependencies when necessary
### Test Helpers
- Mark helper functions with `t.Helper()`
- Create test fixtures for complex setup
- Use `testing.TB` interface for functions used in tests and benchmarks
- Clean up resources using `t.Cleanup()`
## Security Best Practices
### Input Validation
- Validate all external input
- Use strong typing to prevent invalid states
- Sanitize data before using in SQL queries
- Be careful with file paths from user input
- Validate and escape data for different contexts (HTML, SQL, shell)
### Cryptography
- Use standard library crypto packages
- Don't implement your own cryptography
- Use crypto/rand for random number generation
- Store passwords using bcrypt, scrypt, or argon2 (consider golang.org/x/crypto for additional options)
- Use TLS for network communication
## Documentation
### Code Documentation
- Prioritize self-documenting code through clear naming and structure
- Document all exported symbols with clear, concise explanations
- Start documentation with the symbol name
- Write documentation in English by default
- Use examples in documentation when helpful
- Keep documentation close to code
- Update documentation when code changes
- Do not use emoji in documentation and comments
### README and Documentation Files
- Include clear setup instructions
- Document dependencies and requirements
- Provide usage examples
- Document configuration options
- Include troubleshooting section
## Tools and Development Workflow
### Essential Tools
- `make fmt`: Format code
- `make lint`: Additional linting
- `make test`: Run tests
- `go mod`: Manage dependencies
### Development Practices
- Run tests before committing (`make test`)
- Run linter before committing (`make lint`)
- Keep commits focused and atomic
- Write meaningful commit messages
- Review diffs before committing
## Common Pitfalls to Avoid
- Not checking errors
- Ignoring race conditions
- Creating goroutine leaks
- Not using defer for cleanup
- Modifying maps concurrently
- Not understanding nil interfaces vs nil pointers
- Forgetting to close resources (files, connections)
- Using global variables unnecessarily
- Over-using unconstrained types (e.g., `any`); prefer specific types or generic type parameters with constraints. If an unconstrained type is required, use `any` rather than `interface{}`
- Not considering the zero value of types
- **Creating duplicate `package` declarations** - this is a compile error; always check existing files before adding package declarations
@@ -1,534 +0,0 @@
---
description: 'Documentation and content creation standards'
applyTo: '**/*.md'
---
## Markdown Content Rules
The following markdown content rules are enforced in the validators:
1. **Headings**: Use appropriate heading levels (H2, H3, etc.) to structure your content. Do not use an H1 heading, as this will be generated based on the title.
2. **Lists**: Use bullet points or numbered lists for lists. Ensure proper indentation and spacing.
3. **Code Blocks**: Use fenced code blocks for code snippets. Specify the language for syntax highlighting.
4. **Links**: Use proper markdown syntax for links. Ensure that links are valid and accessible.
5. **Images**: Use proper markdown syntax for images. Include alt text for accessibility.
6. **Tables**: Use markdown tables for tabular data. Ensure proper formatting and alignment.
7. **Line Length**: Limit line length to 400 characters for readability.
8. **Whitespace**: Use appropriate whitespace to separate sections and improve readability.
9. **Front Matter**: Include YAML front matter at the beginning of the file with required metadata fields.
## Formatting and Structure
Follow these guidelines for formatting and structuring your markdown content:
- **Headings**: Use `##` for H2 and `###` for H3. Ensure that headings are used in a hierarchical manner. Recommend restructuring if content includes H4, and more strongly recommend for H5.
- **Lists**: Use `-` for bullet points and `1.` for numbered lists. Indent nested lists with two spaces.
- **Code Blocks**: Use triple backticks to create fenced code blocks. Specify the language after the opening backticks for syntax highlighting (e.g., `csharp`).
- **Links**: Use `[link text](URL)` for links. Ensure that the link text is descriptive and the URL is valid.
- **Images**: Use `![alt text](image URL)` for images. Include a brief description of the image in the alt text.
- **Tables**: Use `|` to create tables. Ensure that columns are properly aligned and headers are included.
- **Line Length**: Break lines at 80 characters to improve readability. Use soft line breaks for long paragraphs.
- **Whitespace**: Use blank lines to separate sections and improve readability. Avoid excessive whitespace.
## Follow our Guidelines
### Spelling
In cases where American spelling differs from Commonwealth/"British" spelling, use the American spelling.
Although non-American readers tend to be tolerant of reading American spelling in technical documentation,
they may find it difficult to have to type American spelling.
For example, if your documentation tells a reader who's used to the spelling colour to type color,
they may mistype it. So when you use filenames, URLs, and data parameters in examples,
try to avoid words that are spelled differently by different groups of English speakers.
### Write accessibly
#### Ease of reading
* Do not force line breaks (hard returns) within sentences and paragraphs.
Line breaks might not work well in resized windows or with enlarged text.
* Break up walls of text to aid in scannability.
For example, separate paragraphs, create headings, and use lists.
* Prefer short sentences.
* Define acronyms and abbreviations on first usage and if they are used infrequently.
* Place distinguishing and important information of a paragraph in the first sentence to aid in scannability.
* Use clear and direct language. Avoid the use of double negatives and exceptions in exceptions.
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
A missing path will not prevent you from continuing.
```
<ul>
<li>Double negation (missing, not)</li>
<li>Use of future tense (will)</li>
</ul>
</td><td>
```markdown
You can continue without a path.
```
</td></tr>
</tbody></table>
#### Headings and titles
Use descriptive headings and titles because they help a reader navigate their browser and the page.
It's easier to jump between pages and sections of a page if the headings and titles are unique.
* Use a heading hierarchy.
* Do not skip levels of hierarchy (`h3` can only exist under `h2`)
* Do not use empty headings
* Use a level-1 heading for the page title.
* Use sentence casing for titles and headings.
#### Links
* Use meaningful link text. Links should make sense when read out of context.
* Do not force links to open in a new tab or window, let the reader decide how to open links.
* When possible, avoid adjacent links. Instead, put at least one character in between to separate them.
* If a link downloads a file, indicate this action and the file type in the link text.
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
Use meaningful link text like described [here](https://developers.google.com/style/link-text).
Use meaningful link text. [See document.](https://developers.google.com/style/link-text)
Use meaningful link text. https://developers.google.com/style/link-text
```
</td><td>
```markdown
Use [meaningful link text](https://developers.google.com/style/link-text).
```
</td></tr>
</tbody></table>
#### Images
* When possible, use SVG images over any other format, since they are significantly lighter while having perfect information.
* For every image, provide alt text that adequately summarizes the intent of each image.
* Most of the time, do not present new information in images; always provide an equivalent text explanation with the image. There are of course exceptions for that, such as architecture diagrams, sequence diagrams etc.
* Do not repeat images.
* Avoid images of text, use text instead.
#### Tables
* Introduce tables in the text preceding the table.
* Avoid using tables to lay out pages.
* If the table contains only a single column, use a list instead.
* Do not put tables in the middle of lists or sentences.
* Sort rows in a logical order, or alphabetically if there is no logical order.
### Use the active voice
In general, use the active voice instead of the passive voice. Make it clear who is performing the action.
When using passive voice, it is easy to neglect to indicate who or what is performing the described action.
In this kind of construction, it is often hard for readers to figure out who is supposed to do something.
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
The service is queried, and an acknowledgment is sent.
The service is queried by you, and an acknowledgment is sent by the server.
```
</td><td>
```markdown
Send a query to the service. The server sends an acknowledgment.
```
</td></tr>
</tbody></table>
#### Exceptions
In certain cases, it makes more sense to use the passive voice.
* To emphasize an object over an action.
* To de-emphasize a subject or actor.
* If your readers do not need to know who is responsible for the action.
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
You created over 50 conflicts in the file.
```
</td><td>
```markdown
Over 50 conflicts were found in the file.
```
</td></tr>
<tr><td>
```markdown
The system saved your file.
```
</td><td>
```markdown
The file is saved.
```
</td></tr>
<tr><td>
```markdown
A system administrator purged the database in January.
```
</td><td>
```markdown
The database was purged in January.
```
</td></tr>
</tbody></table>
### Write for a global audience
* Provide context. Do not assume that the reader already knows what you're talking about.
* Avoid negative constructions when possible. Consider whether it's necessary to tell the reader what they can't do instead of what they can.
* Avoid directional language (for example, above or below) in procedural documentation.
This increases maintenance costs and could lead to future modifications breaking the documentation.
Here are some examples.
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
This document makes use of the following terms:
```
Can be substituted for a simpler verb.
</td><td>
```markdown
This document uses the following terms:
```
</td></tr>
<tr><td>
```markdown
A hybrid cloud-native DevSecOps pipeline
```
Too many nouns as modifiers of another noun. Can be broken into two parts.
</td><td>
```markdown
A cloud-native DevSecOps pipeline in a hybrid environment
```
</td></tr>
<tr><td>
```markdown
Only request one token.
```
Misplaced modifier, makes the sentence less clear and more ambiguous.
</td><td>
```markdown
Request only one token.
Request no more than one token.
Request a single token.
```
</td></tr>
<tr><td>
```markdown
If you use the term green beer in an ad, then make sure that it is targeted.
```
Here, "it is" becomes ambiguous. It could describe the green beer or the ad.
</td><td>
```markdown
If you use the term green beer in an ad, then make sure that the ad is targeted.
```
</td></tr>
</tbody></table>
#### Use present tense
In general, use present tense rather than future tense; in particular, try to avoid using _will_ where possible.
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
Send a query to the service. The server will send an acknowledgment.
```
</td><td>
```markdown
Send a query to the service. The server sends an acknowledgment.
```
</td></tr>
</tbody></table>
Sometimes, of course, future tense is unavoidable because you're actually talking about the future
(for example, _This document will be outdated once PR #12345 gets merged._).
Attempting to predict the future in a document is usually a bad idea, but sometimes it's necessary.
However, the fact that the reader will be writing and running code in the future isn't a good reason to use future tense.
Also avoid the hypothetical future would—for example:
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
You can send an unsubscribe message. The server would then remove you from the mailing list.
```
</td><td>
```markdown
If you send an unsubscribe message, the server removes you from the mailing list.
```
</td></tr>
</tbody></table>
#### Use clear, precise, unambiguous language
* Use simple words. For example, do not use words like _commence_ when you mean _start_ or _begin_.
* Define abbreviations. Abbreviations can be confusing out of context, and they don't translate well.
Spell things out whenever possible, at least the first time that you use a given term.
#### Be consistent
If you use a particular term for a particular concept in one place, then use that exact same term elsewhere, including the same capitalization.
* Use standard English word order. Sentences follow the subject + verb + object order.
* Try to keep the main subject and verb as close to the beginning of the sentence as possible.
* Use the conditional clause first. If you want to tell the audience to do something in a particular circumstance, mention the circumstance before you provide the instruction.
* Make list items consistent. Make list items parallel in structure. Be consistent in your capitalization and punctuation.
* Use consistent typographic formats. Use bold and italics consistently. Don't switch from using italics for emphasis to underlining.
* Avoid colloquialisms, idioms, or slang. Phrases like ballpark figure, back burner, or hang in there can be confusing to non-native readers.
### Describe conditions before instructions
If you want to tell the reader to do something, try to mention the circumstance, conditions, or goal before you provide the instruction.
Mentioning the circumstance first lets the reader skip the instruction if it doesn't apply.
<table>
<thead><tr><th>Bad</th><th>Good</th></tr></thead>
<tbody>
<tr><td>
```markdown
See [link to other document] for more information.
Click Delete if you want to delete the entire document.
Using custom domains might add noticeable latency to responses if your app is located in one of the following regions:
```
</td><td>
```markdown
For more information, see [link to other document].
To delete the entire document, click Delete.
If your app is located in one of the following regions, using custom domains might add noticeable latency to responses:
```
</td></tr>
</tbody></table>
### Use lists
Introduce a list with the appropriate context. In most cases, precede a list with an introductory sentence.
* Use simple numbered lists for steps to be performed in order.
* Nested sequential lists can detail sub-steps as well.
* Use bulleted lists when there are no sequences or options.
### Use code blocks
In most cases, precede a code sample with an introductory sentence.
* Do not use tabs to indent code; use spaces only.
* Wrap lines at 80 characters if you need to, but try to use shorter lines in code blocks.
* Specify the code block language, for syntax highlighting.
* If the code block is meant to show a command being run, prefer showing the expected output if applicable.
### Markdown guidelines
#### Add spacing to headings
Prefer spacing after `#` and newlines before and after.
```markdown
...text before.
# Heading 1
Text after...
```
#### Use lazy numbering for long lists
Markdown is smart enough to let the resulting HTML render your numbered lists correctly.
For longer lists that may change, especially long nested lists, use _lazy_ numbering.
```markdown
1. Foo.
1. Bar.
1. Barbaz.
1. Barbar.
1. Baz.
```
However, if the list is small, and you dont anticipate changing it, prefer fully numbered lists,
because it is nicer to read in source.
#### Long links
Long links make source Markdown difficult to read and break the 80 character wrapping. Wherever possible, **shorten your links**.
If it is not possible, feel free to reference links at the bottom of the paragraph instead:
```markdown
This paragraph's lines would get very long and difficult to wrap if the [full link] is included inline.
[full link]:https://www.reallylong.link/rll/BFob89Cv/Owa_TbBBi3Bn9/n5cahxQtC4TOH/afoPnUDyyOS/_8Ilq4zSBjqmo8w/j6UN1uviS9zky
```
#### Prefer lists to tables
Any tables in your Markdown should be small.
Complex, large tables are difficult to read in source and most importantly, a pain to modify later.
Lists and subheadings usually suffice to present the same information in a slightly less compact,
though much more edit-friendly way.
Here is a bad example:
```markdown
Fruit | Attribute | Notes
--- | --- | ---
Apple | [Juicy](https://example.com/SomeReallyReallyReallyReallyReallyReallyReallyReallyLongQuery), Firm, Sweet | Apples keep doctors away.
Banana | [Convenient](https://example.com/SomeDifferentReallyReallyReallyReallyReallyReallyReallyReallyLongQuery), Soft, Sweet | Contrary to popular belief, most apes prefer mangoes.
```
And here is a better alternative:
```markdown
## Fruits
### Apple
* [Juicy](https://SomeReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyReallyLongURL)
* Firm
* Sweet
Apples keep doctors away.
### Banana
* [Convenient](https://example.com/SomeDifferentReallyReallyReallyReallyReallyReallyReallyReallyLongQuery)
* Soft
* Sweet
Contrary to popular belief, most apes prefer mangoes.
```
#### Strongly prefer Markdown to HTML
Please prefer standard Markdown syntax wherever possible and avoid HTML hacks.
If you can not seem to accomplish what you want, reconsider whether you really need it.
Except for big tables, Markdown meets almost all needs already.
Every bit of HTML or Javascript hacking reduces the readability and portability.
This in turn limits the usefulness of integrations with other tools, which may either present the source as plain text or render it.
#### Spacing
* Remove all trailing whitespaces at end of lines.
* Remove instances of multiple consecutive blank lines.
* Files should end with a single newline character.
## Validation Requirements
Ensure compliance with the following validation requirements:
- **Front Matter**: Include the following fields in the YAML front matter:
- `post_title`: The title of the post.
- `author1`: The primary author of the post.
- `post_slug`: The URL slug for the post.
- `microsoft_alias`: The Microsoft alias of the author.
- `featured_image`: The URL of the featured image.
- `categories`: The categories for the post. These categories must be from the list in /categories.txt.
- `tags`: The tags for the post.
- `ai_note`: Indicate if AI was used in the creation of the post.
- `summary`: A brief summary of the post. Recommend a summary based on the content when possible.
- `post_date`: The publication date of the post.
- **Content Rules**: Ensure that the content follows the markdown content rules specified above.
- **Formatting**: Ensure that the content is properly formatted and structured according to the guidelines.
- **Validation**: Run the validation tools to check for compliance with the rules and guidelines.
## Admonitions
Use GitHub-flavored markdown for admonitions: NOTE, WARNING, TIP, IMPORTANT, CAUTION.
Examples:
```markdown
> [!NOTE]
> Highlights information that users should take into account, even when skimming.
> [!TIP]
> Optional information to help a user be more successful.
> [!IMPORTANT]
> Crucial information necessary for users to succeed.
> [!WARNING]
> Critical content demanding immediate user attention due to potential risks.
> [!CAUTION]
> Negative potential consequences of an action.
```
-56
View File
@@ -1,56 +0,0 @@
name: Go Build
on:
push:
branches:
- main
pull_request:
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Install Go
id: install-go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
cache: false
- name: Cache Go mod
id: gomod
uses: actions/cache@v5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-mod-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-mod-
- name: Cache Go build
uses: actions/cache@v5
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-go-build-${{ github.ref_name }}
restore-keys: |
${{ runner.os }}-go-build-
- name: Download dependencies
run: go mod download
if: steps.gomod.outputs.cache-hit != 'true'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v7
env:
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
with:
distribution: goreleaser
version: ${{ inputs.GORELEASER_VERSION }}
args: release --clean --snapshot --skip=docker
-47
View File
@@ -1,47 +0,0 @@
name: Release
on:
push:
tags:
- '*'
jobs:
release:
runs-on: ubuntu-latest
permissions:
contents: write
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Install Go
id: install-go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Download dependencies
run: go mod download
if: steps.install-go.outputs.cache-hit != 'true'
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v7
env:
GORELEASER_CURRENT_TAG: ${{ github.ref_name }}
DOCKER_REPOSITORY: ullaakut/cameradar
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GORELEASER_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
with:
distribution: goreleaser
version: ${{ inputs.GORELEASER_VERSION }}
args: release --clean
-70
View File
@@ -1,70 +0,0 @@
name: Test
on:
push:
branches:
- main
pull_request:
jobs:
test:
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
steps:
- name: Checkout code
uses: actions/checkout@v6
# Go Test looks at `mtime` for caching. `git clone` messes with this. Set it consistently to last commit time.
- name: Restore file modification time
run: git ls-files -z | while read -d '' path; do touch -d "$(git log -1 --format="@%ct" "$path")" "$path"; done
# We need to set a cache marker to ensure that the cache is individual for each job.
- name: Add Cache Marker
run: echo "go-test" > env.txt
- name: Install Go
id: install-go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
cache-dependency-path: |
go.sum
env.txt
# We trigger mod download separately as otherwise it will count towards
# the 1 minute default timeout of golangci-lint. Only needed if there is no cache.
- name: Download dependencies
run: go mod download
if: steps.install-go.outputs.cache-hit != 'true'
- name: Run Linter
uses: golangci/golangci-lint-action@v9
with:
version: v2.7.2
- name: Setup gotestsum
uses: gertd/action-gotestsum@v3.0.0
with:
gotestsum_version: v1.13.0
- name: Download nmap
run: sudo apt-get install -y nmap
- name: Run Tests
env:
TEST_DIR: ${{ inputs.TEST_DIR }}
run: |
GOTESTSUM_FLAGS="--junitfile tests.xml --format pkgname -- -cover -race"
if [ -z "$TEST_DIR" ]; then
gotestsum $GOTESTSUM_FLAGS ./...
else
gotestsum $GOTESTSUM_FLAGS ./$TEST_DIR/...
fi
- name: Test Summary
uses: test-summary/action@v2
with:
paths: "tests.xml"
if: always()
+46 -5
View File
@@ -1,6 +1,47 @@
# IDE config
.idea/
.vscode/
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Builds
dist/
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
# Results
result.json
test-results.xml
# Build
build/
# JetBrains
.idea/
# Deps
deps/boost/
deps/jsoncpp/
mysql-connector/
# Test
test/cameradartest.conf.json
test/cameradar_*_Debug_Linux.tar.gz
-70
View File
@@ -1,70 +0,0 @@
version: "2"
run:
tests: false
linters:
default: all
disable:
- depguard
- dupl
- err113
- exhaustive
- exhaustruct
- forcetypeassert
- funcorder
- funlen
- gochecknoglobals
- gochecknoinits
- gocyclo
- godox
- gomoddirectives
- inamedparam
- ireturn
- mnd
- nilnil
- nlreturn
- nonamedreturns
- tagliatelle
- varnamelen
- wrapcheck
- wsl
- wsl_v5
settings:
cyclop:
max-complexity: 15
gosec:
excludes:
- G101
- G304
- G402
lll:
line-length: 160
tagliatelle:
case:
rules:
json: pascal
use-field-name: true
exclusions:
generated: lax
rules:
- path: (.+)\.go$
text: 'ST1000: at least one file in a package should have a package comment'
- path: (.+)\.go$
text: 'package-comments: should have a package comment'
- path: (.+)\.go$
text: 'Error return value of `.+\.Close` is not checked'
- linters:
- cyclop
path: (.+)_test\.go
paths: []
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
settings:
gofumpt:
extra-rules: true
exclusions:
generated: lax
paths: []
-100
View File
@@ -1,100 +0,0 @@
version: 2
project_name: cameradar
dist: dist/cameradar
env:
- GO111MODULE=on
before:
hooks:
- go mod download
builds:
- binary: cameradar
main: ./cmd/cameradar
env:
- CGO_ENABLED=0
goos:
- windows
- darwin
- linux
goarch:
- amd64
- 386
- arm
- arm64
goarm:
- 6
- 7
ignore:
- goos: darwin
goarch: 386
changelog:
disable: true
checksum:
name_template: "{{ .ProjectName }}_checksums.txt"
archives:
- name_template: "{{ .Binary }}_{{ .Os }}_{{ .Arch }}{{ if .Arm}}v{{ .Arm }}{{ end }}"
formats:
- tar.gz
format_overrides:
- goos: windows
format: zip
dockers:
- image_templates:
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-amd64"
- "ullaakut/{{ .ProjectName }}:latest-amd64"
dockerfile: Dockerfile
use: buildx
goos: linux
goarch: amd64
- image_templates:
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-386"
- "ullaakut/{{ .ProjectName }}:latest-386"
dockerfile: Dockerfile
use: buildx
goos: linux
goarch: 386
- image_templates:
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv6"
- "ullaakut/{{ .ProjectName }}:latest-armv6"
dockerfile: Dockerfile
use: buildx
goos: linux
goarch: arm
goarm: 6
- image_templates:
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv7"
- "ullaakut/{{ .ProjectName }}:latest-armv7"
dockerfile: Dockerfile
use: buildx
goos: linux
goarch: arm
goarm: 7
- image_templates:
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-arm64"
- "ullaakut/{{ .ProjectName }}:latest-arm64"
dockerfile: Dockerfile
use: buildx
goos: linux
goarch: arm64
docker_manifests:
- name_template: "ullaakut/{{ .ProjectName }}:v{{ .Version }}"
image_templates:
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-amd64"
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-386"
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv6"
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-armv7"
- "ullaakut/{{ .ProjectName }}:v{{ .Version }}-arm64"
- name_template: "ullaakut/{{ .ProjectName }}:latest"
image_templates:
- "ullaakut/{{ .ProjectName }}:latest-amd64"
- "ullaakut/{{ .ProjectName }}:latest-386"
- "ullaakut/{{ .ProjectName }}:latest-armv6"
- "ullaakut/{{ .ProjectName }}:latest-armv7"
- "ullaakut/{{ .ProjectName }}:latest-arm64"
+58
View File
@@ -0,0 +1,58 @@
language: generic
sudo: required
dist: trusty
before_install:
- echo "Testing Docker Hub credentials"
- docker login -u=$DOCKER_USERNAME -p=$DOCKER_PASSWORD
- echo "Docker Hub credentials are working"
- sudo apt-get update -qq
- sudo apt-get install -y software-properties-common
- sudo add-apt-repository -y ppa:mc3man/trusty-media
- sudo add-apt-repository -y ppa:george-edison55/cmake-3.x
- sudo apt-get update -qq
- sudo apt-get install -y nmap
- sudo apt-get install -y ffmpeg
- sudo apt-get install -y cmake
- sudo apt-get install -y libboost-all-dev
- sudo apt-get install -y libgstreamer1.0-dev
- sudo apt-get install -y gstreamer1.0-plugins-base
- sudo apt-get install -y gstreamer1.0-plugins-good
- sudo apt-get install -y libcurl4-openssl-dev
- sudo apt-get install -y libmysqlclient18
- sudo apt-get install -y mysql-client
install:
- export DEPS_DIR="${TRAVIS_BUILD_DIR}/deps"
- export PACKAGE_NAME="cameradar_*_Debug_Linux"
matrix:
include:
- os: linux
env: TEST_TYPE='BUILD' WORKDIR='deployment' COMPILER_NAME=gcc CXX=g++-5 CC=gcc-5 CMAKE_CXX_COMPILER=g++-5
addons:
apt:
packages:
- g++-5
sources: &sources
- ubuntu-toolchain-r-test
- os: linux
env: TEST_TYPE='TEST' WORKDIR='test' COMPILER_NAME=gcc CXX=g++-5 CC=gcc-5 CMAKE_CXX_COMPILER=g++-5
addons:
apt:
packages:
- g++-5
sources: &sources
- ubuntu-toolchain-r-test
script:
- cd ${WORKDIR}
- ./build_last_package.sh Debug
- tar xvf ${PACKAGE_NAME}.tar.gz
- find ${DEPS_DIR} -name "*.so*" -exec cp {} ${PACKAGE_NAME}/libraries \;
- tar -czvf ${PACKAGE_NAME}.tar.gz ${PACKAGE_NAME}
- if [[ "$TEST_TYPE" == "BUILD" ]]; then docker build -t cameradar . && docker run -v /tmp/thumbs:/tmp/thumbs cameradar; else ./test.sh ; fi
after_success:
- echo "Test Success - Branch($TRAVIS_BRANCH) Pull Request($TRAVIS_PULL_REQUEST) Tag($TRAVIS_TAG)"
- if [[ "$TRAVIS_BRANCH" == "master" ]]; then echo -e "Push Container to Docker Hub" && docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD && docker tag cameradar $DOCKER_REPO:latest && docker push $DOCKER_REPO; fi
+186
View File
@@ -0,0 +1,186 @@
# Cameradar Changelog
This file lists all versions of the repository and precises all changes.
## v1.1.3
#### Minor changes :
* Added automatic pushes to DockerHub to the travis integration
* Made travis configuration file better
* Changed the package generation scripts to make them report errors
* Removed old etix_rtsp_server binary from the test folder
#### Bugfixes :
* Fixed an issue that made it mandatory to launch tests at least once so that they can work the second time
* Fixed an issue that made the golang testing tool not compile in the testing script
* Fixed an issue that made the golang testing tool sometimes ignore some tests
* The previous known issue has been investigated and we don't know where it came from. However after a night of testing I have been unable to reproduce it, so I will consider it closed
## v1.1.2
#### Minor changes :
* Added travis integration
* Added default environment value for Docker deployment
* Updated docker image description with new easy usage
* Updated README badges style (replaced flat with square-flat)
* Build last package can now also generate a debug package if given the `Debug` command-line argument
#### Known issues :
* There is still the issue with Camera Emulation Server, see the [previous version's patchnote](#v1.1.1) for more information.
## v1.1.1
#### Minor changes :
* Removed unnecessary null pointer checks (thanks to https://github.com/elfring)
* Updated package description
* Removed debug message in CMake build
* Added `/ch01.264` to the URL dictionary in the deployment (Comelit default RTSP URL)
* Updated tests partially (still needs work to make the code cleaner)
* Variable names are now compliant with Golang best practices
* JSON variable names are back to normal
* Functions have been moved in more appropriate source files
* Structure definitions have been moved in more appropriate source files
* Source files have been renamed to be more relevant
* JUnit output now considers each camera as a test case
* JUnit output now contains errors which makes debugging much easier
* Added header files where it was forgotten
#### Bugfixes :
* Fixed an issue where if you loose your internet connection during thumbnail generation, FFMpeg would get stuck forever and thus Cameradar would never finish
* Fixed an issue where multithreading could cause crashes
* Fixed an issue where the routes dictionary was mistaken for the credentials dictionary
* Fixed issues with the golang testing tool
* Fixed automated camera generation
* Fixed docker IP address resolution
#### Known issues :
* There is an issue with Camera Emulation Server that makes it impossible for Cameradar to generate thumbnails, which is why right now the verification of the thumbnails presence is commented and it is assumed correct. It is probably an issue with GST-RTSP-Server but requires investigation.
## v1.1.0
#### Major changes :
* There are more command line options
* Port can now be overridden in the command line
* Subnet can now be overridden in the command line
* Bruteforce is now multithreaded and will use as many threads as there are discovered cameras
* Thumbnail generation is now multithreaded and will use as many threads as there are discovered cameras
* There are now default configuration values in order to make cameradar easier to use
#### Minor changes :
* The algorithms take external input into account (so that a 3rd party can change the DB to help Cameradar in real-time) and thus check the persistent data at each iteration
* The default log level is now DEBUG instead of INFO
* The bruteforce logs are now INFO instead of DEBUG
* The thumbnail generation logs are now INFO instead of DEBUG
#### Bugs fixed
* Fixed a bug in which the MySQL cache manager would consider a camera with known ids as having a valid path even if it weren't
* Fixed a bug in which TCP RTSP streams would not generate thumbnails
## v1.0.5
* Fixed error in MySQL Cache Manager in which thumbnail generation on valid streams could not be done
* Fixed potential crash in the case the machine running cameradar has no memory left to allocate space for the dynamic cache manager
## v1.0.4
#### Bugs fixed :
* Fixed nmap package detection
## v1.0.3
#### Bugs fixed :
* Corrected GStreamer check
## v1.0.2
#### Bugs fixed :
* Fixed issues in MySQL Cache Manager
#### Minor changes :
* Added useful debug logs
## v1.0.1
### Ubuntu 16.04 Release
#### Major changes :
* The Docker deployment is now done using Ubuntu 16.04 instead of Ubuntu 15.10, so that it uses more recent packages.
#### Minor changes :
* Removed useless dependencies
## v1.0.0
### First production-ready release
#### Major changes :
* Added functional testing
## v0.2.2
After doing some testing on a weirdly configured camera network in a far away Datacenter, I discovered that some Cameras needed a few tweaks to the Cameradar bruteforcing method in order to be accessed.
#### Major changes :
* Cameradar can access Cameras that are configured to always send 400 Bad Requests responses
#### Minor changes :
* Changed iterator name from `it` to `stream` in dumb cache manager to improve code readability
#### Bugfixes :
* Cameradar no longer considers a timing out Camera as an accessible stream
## v0.2.1
This package adds fixes the Docker deployment package.
#### Minor changes
* Fixed the Docker deployment package
* Updated README
## v0.2.0
### MySQL Cache Manager Release
This package adds a new cache manager using a MySQL database, that can store the results between mutiple uses.
#### Major changes
* Added a MySQL Cache Manager
#### Minor changes
* Removed legacy code
* Removed boost dependency
* Improved debugging logs
## v0.1.1
### Docker release
This package adds a way to deploy Cameradar using Docker.
#### Major changes
* Added a quick Docker deployment process
* Added automatic dependencies downloading through CMake for the manual installation
* Added CPack packaging for the Docker deployment
#### Minor changes
* Changed recommended cloning method to HTTPS
* Added lots of informations to README.md
## v0.1.0
This package was the first OpenSource version of Cameradar. It contained only a simple cache manager and had some bugs.
+130
View File
@@ -0,0 +1,130 @@
## Copyright 2016 Etix Labs
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
cmake_minimum_required (VERSION 2.8.1)
cmake_policy(SET CMP0048 OLD)
set (PROJECT_NAME cameradar)
project (${PROJECT_NAME})
set (${PROJECT_NAME}_VERSION_MAJOR 1)
set (${PROJECT_NAME}_VERSION_MINOR 1)
set (${PROJECT_NAME}_VERSION_PATCH 3)
set (${PROJECT_NAME}_VERSION "${${PROJECT_NAME}_VERSION_MAJOR}.${${PROJECT_NAME}_VERSION_MINOR}.${${PROJECT_NAME}_VERSION_PATCH}${${PROJECT_NAME}_SUFFIX}")
find_package(Git REQUIRED)
# compiler flags
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14") #enable C++14
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wall -Wextra -Wno-unused-function") # extra warnings
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-color") #enable error coloration on gcc
# release specific flags
set (CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O2")
#debug specific flags
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -g -fprofile-arcs -ftest-coverage")
# rpath enable osx
set(CMAKE_MACOSX_RPATH 1)
# list of all cache managers
set (CAMERADAR_CACHE_MANAGERS "")
# dependencies directory
set(DEPS_DIR ${CMAKE_SOURCE_DIR}/deps)
# output path for cache managers
set (CAMERADAR_CACHE_MANAGER_OUTPUT_FOLDER cache_managers)
set (CAMERADAR_CACHE_MANAGER_OUTPUT_PATH ${CMAKE_BINARY_DIR}/${CAMERADAR_CACHE_MANAGER_OUTPUT_FOLDER})
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
# the place where the version.h file is generated, used from the main.cpp of cameradar
set (VERSION_INCLUDE_DIR ${PROJECT_BINARY_DIR})
# get the git revision
message (STATUS "retrieve current git revision SHA1 of cameradar")
execute_process(
COMMAND "git" "rev-parse" "HEAD"
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE CAMERADAR_GIT_SHA1
)
# remove last character of the git output (\n)
string(LENGTH ${CAMERADAR_GIT_SHA1} CAMERADAR_GIT_SHA1_LEN)
math(EXPR CAMERADAR_GIT_SHA1_LEN "${CAMERADAR_GIT_SHA1_LEN} - 1")
string(SUBSTRING ${CAMERADAR_GIT_SHA1} 0 ${CAMERADAR_GIT_SHA1_LEN} CAMERADAR_GIT_SHA1)
# print the SHA1
message (STATUS "current cameradar git revision SHA1 is ${CAMERADAR_GIT_SHA1}")
# generate build number from the current timestamp
string(TIMESTAMP CAMERADAR_VERSION_BUILD "%Y%m%d%H%M%S" "UTC")
# print version
message (STATUS "current cameradar build version will be ${CAMERADAR_VERSION_BUILD}")
configure_file (
"${PROJECT_SOURCE_DIR}/version.h.in"
"${PROJECT_BINARY_DIR}/version.h"
)
# add all deps libraries to the link directories path
link_directories (
# third party libraries
"deps/jsoncpp/src/deps.jsoncpp/src/lib_json"
"deps/boost/src/deps.boost/libs"
"deps/mysql-connector/lib"
)
include_directories (
"cameradar_standalone/include"
"deps/jsoncpp/src/deps.jsoncpp/include"
"deps/boost/src/deps.boost/include"
"deps/mysql-connector/include"
)
set (${CAMERADAR_BINARIES} "")
set (${CAMERADAR_LIBRARIES} "")
# Build cache managers
add_subdirectory (deps)
add_subdirectory (cameradar_standalone)
add_subdirectory (cache_managers)
list (APPEND CAMERADAR_LIBRARIES ${CAMERADAR_INSTALL_DEPENDENCIES} ${CAMERADAR_LIBRARIES})
install (PROGRAMS ${CAMERADAR_BINARIES} DESTINATION bin)
install (FILES ${CAMERADAR_CACHE_MANAGERS} DESTINATION cache_managers)
install (FILES ${CAMERADAR_LIBRARIES} DESTINATION libraries)
install (DIRECTORY ${CMAKE_SOURCE_DIR}/deps/licenses DESTINATION libraries)
# CPack configuration
include (InstallRequiredSystemLibraries)
set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "cameradar")
set (CPACK_PACKAGE_VENDOR "Etix Labs")
set (CPACK_PACKAGE_DESCRIPTION_SUMMARY "Cameradar hacks its way into RTSP CCTV cameras")
set (CPACK_PACKAGE_FILE_NAME "${PROJECT_NAME}_${${PROJECT_NAME}_VERSION}_${CMAKE_BUILD_TYPE}_${CMAKE_SYSTEM_NAME}")
set (CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
set (CPACK_PACKAGE_VERSION_MAJOR "0")
set (CPACK_PACKAGE_VERSION_MINOR "2")
set (CPACK_PACKAGE_VERSION_PATCH "2")
set (CPACK_PACKAGE_INSTALL_DIRECTORY "${PROJECT_NAME}_${${PROJECT_NAME}_VERSION}")
set (CPACK_GENERATOR "TGZ")
set (CPACK_SOURCE_GENERATOR "TGZ")
include(CPack)
-128
View File
@@ -1,128 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
`contact+cameradar@glaulabs.com`.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
-70
View File
@@ -1,70 +0,0 @@
## Contributing
Thanks for helping improve Cameradar.
Please keep changes focused and aligned with the project goals.
## Development setup
- Go 1.25 or later
- Docker (optional, for container testing)
Clone the repo and install dependencies using Go modules.
```bash
go mod download
```
## Run tests
```bash
make test
```
## Formatting and linting
Keep code idiomatic and consistent with existing style.
By default, follow the [Uber Go Style Guide](https://github.com/uber-go/guide) and the guidelines from [Effective Go](https://go.dev/doc/effective_go).
```bash
make fmt
```
### Dependency for linting
* golangci-lint
* see current version defined in `.github/workflows/test.yaml` at `jobs.tests.steps.["Run linter"]`
* configured in `.golangci.yml`
```bash
make lint
```
## Commit messages and PR titles
Use [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) for commit messages and pull request titles.
- Use the format: `type: subject`
- Write the subject in imperative mood: `add`, `update`, `remove`, `fix`, `refactor`
- Do not use gerunds in subjects: avoid `adding`, `updating`, `removing`
Examples:
- `feat: add RTSP timeout flag`
- `fix: remove duplicate progress line`
- `docs: update commit message guidelines`
## Reporting issues
Use the issue template in [.github/ISSUE_TEMPLATE.md](.github/ISSUE_TEMPLATE.md).
Include the version, environment, and repro steps.
Only scan authorized targets.
## Pull requests
1. Create a feature branch from `master`.
2. Keep PRs focused and small.
3. Update documentation when behavior changes.
4. Add or update tests when possible.
5. Ensure `make test` passes.
6. Try to bring as much test coverage as possible with your changes.
7. Use a Conventional Commit-style PR title with an imperative subject.
BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 220 KiB

-17
View File
@@ -1,17 +0,0 @@
FROM alpine
RUN apk --update add --no-cache nmap \
nmap-nselibs \
nmap-scripts \
masscan \
libpcap \
libpcap-dev
WORKDIR /app/cameradar
COPY cameradar /app/cameradar/cameradar
ENV CAMERADAR_CUSTOM_ROUTES="/app/dictionaries/routes"
ENV CAMERADAR_CUSTOM_CREDENTIALS="/app/dictionaries/credentials.json"
ENTRYPOINT ["/app/cameradar/cameradar"]
+199 -15
View File
@@ -1,17 +1,201 @@
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-28
View File
@@ -1,28 +0,0 @@
# set this e.g. via `make build GORELEASER_FLAGS="--skip=docker"` for temporary flags
GORELEASER_FLAGS=
#Format
fmt:
@echo "==> Formatting source"
@gofmt -s -w $(shell find . -type f -name '*.go')
@echo "==> Done"
.PHONY: fmt
#Test
test:
@go test -cover -race ./...
.PHONY: test
#Lint
lint:
@golangci-lint run --config=.golangci.yml ./...
.PHONY: lint
#Build
build:
@goreleaser release $(GORELEASER_FLAGS) --clean --snapshot
.PHONY: build
+237 -243
View File
@@ -1,328 +1,322 @@
## Cameradar
# Cameradar
<p align="center">
<a href="#license">
<img src="https://img.shields.io/badge/license-MIT-blue.svg?style=flat" />
</a>
<a href="https://hub.docker.com/r/ullaakut/cameradar/">
<img src="https://img.shields.io/docker/pulls/ullaakut/cameradar.svg?style=flat" />
</a>
<a href="https://github.com/Ullaakut/cameradar/actions">
<img src="https://img.shields.io/github/actions/workflow/status/Ullaakut/cameradar/build.yaml" />
</a>
<a href='https://coveralls.io/github/Ullaakut/cameradar?branch=master'>
<img src='https://coveralls.io/repos/github/Ullaakut/cameradar/badge.svg?branch=master' alt='Coverage Status' />
</a>
<a href="https://goreportcard.com/report/github.com/ullaakut/cameradar">
<img src="https://goreportcard.com/badge/github.com/ullaakut/cameradar" />
</a>
<a href="https://github.com/ullaakut/cameradar/releases/latest">
<img src="https://img.shields.io/github/release/Ullaakut/cameradar.svg?style=flat" />
</a>
<a href="https://pkg.go.dev/github.com/ullaakut/cameradar">
<img src="https://godoc.org/github.com/ullaakut/cameradar?status.svg" />
</a>
</p>
## An RTSP surveillance camera access multitool
## RTSP stream access tool
[![cameradar License](https://img.shields.io/badge/license-Apache-blue.svg?style=flat-square)](#license)
[![Docker Pulls](https://img.shields.io/docker/pulls/ullaakut/cameradar.svg?style=flat-square)](https://hub.docker.com/r/ullaakut/cameradar/)
[![Build](https://img.shields.io/travis/EtixLabs/cameradar/master.svg?style=flat-square)](https://travis-ci.org/EtixLabs/cameradar)
[![Latest release](https://img.shields.io/badge/release-1.1.3-green.svg?style=flat-square)](https://github.com/EtixLabs/cameradar/releases/latest)
Cameradar scans RTSP endpoints on authorized targets, and uses dictionary attacks to bruteforce their credentials and routes.
### What Cameradar does
#### Cameradar allows you to:
- Detects open RTSP hosts on accessible targets.
- Detects the device model that streams the RTSP feed.
- Attempts dictionary-based discovery of stream routes (for example, `/live.sdp`).
- Attempts dictionary-based discovery of camera credentials.
- Produces a report of findings.
* **Detect open RTSP hosts** on any accessible subnetwork
* Get their public info (hostname, port, camera model, etc.)
* Bruteforce your way into them to get their **stream route** (for example /live.sdp)
* Bruteforce your way into them to get the **username and password** of the cameras
* **Generate thumbnails** from them to check if the streams are valid and to have a quick preview of their content
* Try to create a Gstreamer pipeline to check if they are **properly encoded**
* Print a summary of all the informations Cameradar could get
<p align="center"><img src="images/Cameradar.png" width="250"/></p>
#### And all of this in a _single command-line_.
## Table of contents
Of course, you can also call for individual tasks if you plug in a Database to Cameradar using the MySQL cache manager for example. You can create your own cache manager by following the simple example of the **dumb cache manager**.
- [Quick start with Docker](#quick-start-with-docker)
- [Install the binary](#install-the-binary)
- [Install on Android (Termux)](#install-on-android-termux)
<p align="center"><img src="https://raw.githubusercontent.com/EtixLabs/cameradar/master/Cameradar.png" width="350"/></p>
## Table of content
- [Docker Image](#docker-image)
- [Quick install](#quick-install)
- [Dependencies](#quick-install###dependencies)
- [Five steps guide](#quick-install###five-steps-guide)
- [Manual installation](#manual-installation)
- [Dependencies](#manual-installation###dependencies)
- [Steps](#manual-installation###Steps)
- [Advanced docker deployment](#advanced-docker-deployment)
- [Dependencies](#advanced-docker-deployment###dependencies)
- [Deploy a custom version of Cameradar](#advanced-docker-deployment###deploy-a-custom-version-of-cameradar)
- [Configuration](#configuration)
- [Security and responsible use](#security-and-responsible-use)
- [Output](#output)
- [Check camera access](#check-camera-access)
- [Command-line options and environment variables](#command-line-options-and-environment-variables)
- [Input file format](#input-file-format)
- [Build and contribute](#build-and-contribute)
- [Frequently asked questions](#frequently-asked-questions)
- [Examples](#examples)
- [Command line options](#command-line-options)
- [Under the hood](#under-the-hood)
- [Contribution](#contribution)
- [Next improvements](#next-improvements)
- [Frequently Asked Questions](#frequently-asked-questions)
- [License](#license)
---
## Docker Image
<p align="center"><img src="images/example.gif"/></p>
This is the fastest and simplest way to use Cameradar. To do this you will just need `docker` on your machine.
## Quick start with Docker
Run
Install [Docker](https://docs.docker.com/engine/installation/) and run:
```bash
docker run --rm -t --net=host ullaakut/cameradar --targets <target>
```
docker run \
-v /tmp/thumbs/:/tmp/thumbs \
-e CAMERAS_SUBNETWORKS=your_subnetwork \
ullaakut/cameradar:tag
```
Example:
* `your_subnetwork` can be a subnet (e.g.: `172.16.100.0/24`) or even an IP (e.g.: `172.16.100.10`).
* `tag` allows you to specify a specific version for camerada. If you don't specify any tag, you will use the latest version by default (recommended)
```bash
docker run --rm -t --net=host ullaakut/cameradar --targets 192.168.100.0/24
```
Check [Cameradar's readme on the Docker Hub](https://hub.docker.com/r/ullaakut/cameradar/) for more information and more command-line options.
This scans ports 554, 5554, and 8554 on the target subnet.
It attempts to enumerate RTSP streams.
For all options, see [Configuration reference](https://github.com/Ullaakut/cameradar/wiki/Configuration-Reference).
The generated thumbnails will be in `/tmp/thumbs` on both your machine and the `cameradar` container.
- Targets can be CIDRs, IPs, IP ranges or a hostname.
- Subnet: `172.16.100.0/24`
- IP: `172.16.100.10`
- Host: `localhost`
- Range: `172.16.100.10-20`
For more complex use of the Docker image, see the `Environment variables` part of [Cameradar's readme on the Docker Hub](https://hub.docker.com/r/ullaakut/cameradar/).
- To use custom dictionaries, mount them and pass both flags:
## Quick install
```bash
docker run --rm -t --net=host \
-v /path/to/dictionaries:/tmp/dictionaries \
ullaakut/cameradar \
--custom-routes /tmp/dictionaries/my_routes \
--custom-credentials /tmp/dictionaries/my_credentials.json \
--targets 192.168.100.0/24
```
## Install the binary
Use this option if Docker is not available or if you want a local build.
The quick install uses docker to build Cameradar without polluting your machine with dependencies and makes it easy to deploy Cameradar in a few commands. **However, it may require networking knowledge, as your docker containers will need access to the cameras subnetwork.**
### Dependencies
- Go 1.25 or later
The only dependencies are `docker`, `docker-tools`, `git` and `make`.
### Five steps guide
1. `git clone https://github.com/EtixLabs/cameradar.git`
2. Go into the Cameradar repository, then to the `deployment` directory
3. Tweak the `conf/cameradar.conf.json` as you need (see [the onfiguration guide here](#configuration) for more information)
4. Run `docker-compose build & docker-compose up`
By default, the version of the package in the deployment should be the last stable release.
If you want to scan a different subnetwork or different ports, change the values `CAMERAS_SUBNETWORKS` and `CAMERAS_PORTS` in the `docker-compose.yml` file.
The generated thumbnails will be in the `cameradar_thumbnails` folder after cameradar has finished executing.
If you want to deploy your custom version of Cameradar using the same method, you should check the [advanced docker deployment](#advanced-docker-deployment) tutorial here.
## Manual installation
The manual installation is recommended if you want to tweak Cameradar and quickly test them using CMake and running Cameradar in command-line. If you just want to use Cameradar, it is recommended to use the [quick install](#quick-install) instead.
### Dependencies
To install Cameradar you will need these packages
* cmake (`cmake`)
* git (`git`)
* gstreamer1.x (`libgstreamer1.0-dev`)
* ffmpeg (`ffmpeg`)
* boost (`libboost-all-dev`)
* libcurl (`libcurl4-openssl-dev`)
### Steps
1. `go install github.com/Ullaakut/cameradar/v6/cmd/cameradar@latest`
The simplest way would be to follow these steps :
The `cameradar` binary is now in your `$GOPATH/bin`.
For available flags, see [Configuration reference](https://github.com/Ullaakut/cameradar/wiki/Configuration-Reference).
1. `git clone https://github.com/EtixLabs/cameradar.git`
2. `mkdir build`
3. `cd build`
3. `cmake ..`
4. `make`
5. `cd cameradar_standalone`
6. `./cameradar -s the_subnet_you_want_to_scan`
## Install on Android (Termux)
## Advanced Docker deployment
These steps summarize a working Termux setup for Android.
Use Termux 117 from F-Droid or the official Termux site, not Google Play.
In case you want to use Docker to deploy your custom version of Cameradar.
### 1) Set up Termux and Alpine
### Dependencies
Install the required packages in Termux:
The only dependencies are `docker` and `docker-compose`.
```bash
pkg update
pkg install mc wget git nmap proot-distro
### Using the package generation script
1. `git clone https://github.com/EtixLabs/cameradar.git`
2. `cd deployment`
3. `rm *.tar.gz`
4. `./build_last_package.sh`
5. `docker-compose build cameradar`
6. `docker-compose up cameradar`
### Deploy a custom version of Cameradar by hand
1. `git clone https://github.com/EtixLabs/cameradar.git`
2. `cd build`
3. `cmake .. -DCMAKE_BUILD_TYPE=Release`
4. `make package`
5. `cp cameradar_*_Release_Linux.tar.gz ../deployment`
6. `cd ../deployment`
7. `docker-compose build cameradar`
8. `docker-compose up cameradar`
### Configuration
Here is the basic content of the configuration file with simple placeholders :
```json
{
"mysql_db" : {
"host" : "MYSQL_SERVER_IP_ADDRESS",
"port" : MYSQL_SERVER_PORT,
"user": "root",
"password": "root",
"db_name": "cmrdr"
},
"subnets" : "SUBNET1,SUBNET2,SUBNET3,[...]",
"ports" : "PORT1,PORT2,[...]",
"rtsp_url_file" : "conf/url.json",
"rtsp_ids_file" : "conf/ids.json",
"thumbnail_storage_path" : "/valid/path/to/a/storage/directory",
"cache_manager_path" : "../cache_managers/dumb_cache_manager",
"cache_manager_name" : "dumb"
}
```
Install Alpine and log in:
This **configuration is needed only if you want to overwrite the default values**, which are :
```bash
proot-distro install alpine
proot-distro login alpine
```json
{
"subnets" : "localhost",
"ports" : "554,8554",
"rtsp_url_file" : "conf/url.json",
"rtsp_ids_file" : "conf/ids.json",
"thumbnail_storage_path" : "/tmp",
"cache_manager_path" : "../cache_managers/dumb_cache_manager",
"cache_manager_name" : "dumb"
}
```
### 2) Install build tools in Alpine
This means that **by default Cameradar will not use a database**, will scan localhost and the ports 554 (default RTSP port) and 8554 (default emulated RTSP port), use the default constructor dictionaries and store the thumbnails in `/tmp`. If you need to override simply the subnets or ports, you can use the [command line options](#command-line-options).
```bash
apk add wget git go gcc clang musl-dev make
The subnetworks should be passed separated by commas only, and their subnet format should be the same as used in nmap.
```json
"subnets" : "172.100.16.0/24,172.100.17.0/24,localhost,192.168.1.13"
```
### 3) Build Cameradar
The **RTSP ports for most cameras are 554**, so you should probably specify 554 as one of the ports you scan. Not giving any ports in the configuration will scan every port of every host found on the subnetworks.
Create a module path and clone the repo:
You **can use your own files for the ids and routes dictionaries** used to bruteforce the cameras, but the Cameradar repository already gives you a good base that works with most cameras.
```bash
mkdir -p go/pkg/mod/github.com/Ullaakut
cd go/pkg/mod/github.com/Ullaakut
git clone https://github.com/Ullaakut/cameradar.git
cd cameradar/cmd/cameradar
go install
```
The thumbnail storage path should be a **valid and accessible directory** in which the thumbnails will be stored.
### 4) Run Cameradar
Copy dictionaries and run the binary:
```bash
mkdir -p /tmp
cp -r ../../dictionaries /tmp/dictionaries
/go/bin/cameradar --targets=<target> --custom-credentials=/tmp/dictionaries/credentials.json --custom-routes=/tmp/dictionaries/routes --ui=plain --debug
```
Replace `<target>` with an IP, range, host or subnet you are authorized to test.
## Configuration
The default RTSP ports are `554`, `5554`, `8554`.
If you do not specify ports, Cameradar uses those.
Example of scanning custom ports:
```bash
docker run --rm -t --net=host \
ullaakut/cameradar \
--ports "18554,19000-19010" \
--targets localhost
```
You can replace the default dictionaries with your own routes and credentials files.
The repository provides baseline dictionaries in the `dictionaries` folder.
```bash
docker run --rm -t --net=host \
-v /my/folder/with/dictionaries:/tmp/dictionaries \
ullaakut/cameradar \
--custom-routes /tmp/dictionaries/my_routes \
--custom-credentials /tmp/dictionaries/my_credentials.json \
--targets 172.19.124.0/24
```
### Skip discovery with `--skip-scan`
If you already know the RTSP endpoints, you can skip discovery and treat each
target and port as a stream candidate. This mode does not run discovery and can be
useful on restricted networks or when you want to attack a known inventory.
Skipping discovery means:
- Cameradar does not run discovery and does not detect device models.
- Targets resolve to IP addresses. Hostnames resolve via DNS.
- CIDR blocks and IPv4 ranges expand to every address in the range.
- Large ranges create many targets, so use them carefully.
Example:
```bash
docker run --rm -t --net=host \
ullaakut/cameradar \
--skip-scan \
--ports "554,8554" \
--targets 192.168.1.10
```
In this example, Cameradar attempts dictionary attacks against
ports 554 and 8554 of `192.168.1.10`.
### Choose the discovery scanner with `--scanner`
Cameradar supports two discovery backends:
- `nmap` (default)
- `masscan`
Use `nmap` when you want more reliable RTSP discovery: it performs service
identification and can better distinguish RTSP from other open ports.
Use `masscan` when scanning very large networks: it is generally faster and
more efficient at scale, but it does not provide service discovery.
```bash
docker run --rm -t --net=host \
ullaakut/cameradar \
--scanner masscan \
--ports "554,8554" \
--targets 192.168.1.0/24
```
> [!WARNING]
> `--scan-speed` only applies to the `nmap` scanner.
## Security and responsible use
Cameradar is a penetration testing tool.
Only scan networks and devices you own or have explicit permission to test.
Do not use this tool to access unauthorized systems or streams.
If you are unsure, stop and get written approval before scanning.
The cache manager path and name variables are used to change the cache manager you want to load into Cameradar. If you want to, you can code your own cache manager using a database, a file, a remote server, [...]. Feel free to share it by creating a merge request on this repository if you developed a generic manager (It must not be specific to your company's infrastructure).
## Output
Cameradar presents results in a readable terminal UI.
It logs findings to the console.
The report includes discovered hosts, identified device models, and valid routes or credentials.
If you specify a path for the `--output` flag, Cameradar also writes an M3U playlist with the discovered streams.
For each camera, Cameradar will output these JSON objects :
```json
{
"address" : "173.16.100.45",
"ids_found" : true,
"password" : "123456",
"path_found" : true,
"port" : 554,
"product" : "Vivotek FD9381-HTV",
"protocol" : "tcp",
"route" : "/live.sdp",
"service_name" : "rtsp",
"state" : "open",
"thumbnail_path" : "/tmp/127.0.0.1/1463735257.jpg",
"username" : "admin"
}
```
## Check camera access
Use [VLC Media Player](http://www.videolan.org/vlc/) to connect to a stream:
If you have [VLC Media Player](http://www.videolan.org/vlc/), you should be able to use the GUI to connect to the RTSP stream using this format : `username:password@address:port/route`
`rtsp://username:password@address:port/route`
With the above result, the RTSP URL would be `admin:123456@173.16.100.45:554/live.sdp`
## Input file format
If you're still in your console however, you can go even faster by using **vlc in commmand-line** and just run `vlc username:password@address:port/route` with the camera's info instead of the placeholders.
The file can contain IPs, hostnames, IP ranges, and subnets.
Separate entries with newlines.
Example:
## Command line options
```text
0.0.0.0
localhost
192.17.0.0/16
192.168.1.140-255
192.168.2-3.0-255
```
* **"-c"** : Set a custom path to the configuration file (-c /path/to/conf)
* **"-s"** : Set custom subnets (overrides configuration)
* **"-p"** : Set custom ports (overrides configuration)
* **"-m"** : Set number of threads (*Default value : 1*)
* **"-l"** : Set log level
* **"-l 1"** : Log level DEBUG
* _Will print everything including debugging logs_
* **"-l 2"** : Log level INFO
* _Prints every normal information_
* **"-l 4"** : Log level WARNING
* _Only prints warning and errors_
* **"-l 5"** : Log level ERROR
* _Only prints errors_
* **"-l 6"** : Log level CRITICAL
* _Doesn't print anything since Cameradar can't have critical failures right now, however you can use this level to debug your own code easily or if you add new critical layers_
* **"-d"** : Launch the discovery tool
* **"-b"** : Launch the bruteforce tool on all discovered devices
* Needs either to be launched with the -d option or to use an advanced cache manager (DB, file, ...) with data already present
* **"-t"** : Generate thumbnails from detected cameras
* Needs either to be launched with the -d option or to use an advanced cache manager (DB, file, ...) with data already present
* **"-g"** : Check if the stream can be opened with GStreamer
* Needs either to be launched with the -d option or to use an advanced cache manager (DB, file, ...) with data already present
* **"-v"** : Display Cameradar's version
* **"-h"** : Display this help
* **"--gst-rtsp-server"** : Use this option if the bruteforce does not seem to work (only detects the username but not the path, or the opposite). This option will switch the order of the bruteforce to prioritize path over credentials, which is the way priority is handled for cameras that use GStreamer's RTSP server.
When you use `--skip-scan`, Cameradar expands each entry into explicit IP
addresses before building the target list.
## Under the hood
## Command-line options and environment variables
Cameradar uses **nmap** to map all of the subnetworks you specified in the configuration file (_cameradar.conf.json_), then parses its result to get all of the open RTSP streams that were detected.
The complete CLI and environment variable reference is maintained in [Configuration reference](https://github.com/Ullaakut/cameradar/wiki/Configuration-Reference).
After that, it uses **cURL** to send requests to the cameras and to try routes and ids for each camera until it is accessed or until all of the most used routes/ids (that you can modify in _conf/ids.json_ and _conf/url.json_) were tried
This includes all supported flags, defaults, accepted values, and env var mapping.
Then, it uses **FFMPEG** to generate a lightweight thumbnail from the stream, which you could use to get a quick preview of the camera's view.
## Build and contribute
Finally, it tries to access the stream using a simple **Gstreamer pipeline** to check for the stream's encoding.
### Docker build
The output of Cameradar will be printed on the standard output and will also be accessible in the result.json file.
Run the following command in the repository root:
Cameradar uses **nmap** to map all of the subnetworks you specified in the configuration file (_cameradar.conf.json_), then parses its result to get all of the open RTSP streams that were detected.
`docker build . -t cameradar`
After that, it uses **cURL** to send requests to the cameras and to try routes and ids for each camera until it is accessed or until all of the most used routes/ids (that you can modify in _conf/ids.json_ and _conf/url.json_) were tried
The resulting image is named `cameradar`.
Then, it uses **FFMPEG** to generate a lightweight thumbnail from the stream, which you could use to get a quick preview of the camera's view.
### Go build
Finally, it tries to access the stream using a simple **Gstreamer pipeline** to check for the stream's encoding.
1. `go install github.com/Ullaakut/cameradar/v6/cmd/cameradar@latest`
The output of Cameradar will be printed on the standard output and will also be accessible in the result.json file.
The `cameradar` binary is now in `$GOPATH/bin/cameradar`.
## Contribution
## Frequently asked questions
Well there are many things we could code in order to add features to Cameradar. Adding other protocols than RTSP would be really cool, as well as making more generic cache managers. Improving Cameradar's performance or even the deployment could also be a great help!
See [Troubleshooting & FAQ](https://github.com/Ullaakut/cameradar/wiki/Troubleshooting-%26-FAQ)
If you're not into software development or not into C++, even updating the dictionaries would be a really cool contribution! Just make sure the ids and routes you add are **default constructor credentials** and not custom credentials.
## Examples
If you have other cool ideas, feel free to share them with me at [brendan.leglaunec@etixgroup.com](mailto:brendan.leglaunec@etixgroup.com) !
> Running cameradar on your own machine to scan for default ports
## Next improvements
- [x] Add a docker deployment to avoid the current deps hell
- [x] Development of a MySQL cache manager
- [ ] Development of a JSON file cache manager
- [ ] Development of an XML file cache manager
- [ ] Make a standalone docker image
- [ ] Push to DockerHub
`docker run --rm -t --net=host ullaakut/cameradar --targets localhost`
## Frequently Asked Questions
> Running cameradar with an input file, logs enabled on port 8554
> My camera's credentials are guessed by Cameradar but the RTSP url is not!
`docker run --rm -t --net=host -v /tmp:/tmp ullaakut/cameradar --targets /tmp/test.txt --ports 8554`
Your camera probably uses GST RTSP Server internally. Try the `--gst-rtsp-server` command-line option, and if it does not work, send me the cameradar output in DEBUG mode (`-l 1`) and I will help you.
> Running cameradar on a subnetwork with custom dictionaries, on ports 554, 5554 and 8554
> Cameradar does not detect any camera!
`docker run --rm -t --net=host -v /tmp:/tmp ullaakut/cameradar --targets 192.168.0.0/24 --custom-credentials "/tmp/dictionaries/credentials.json" --custom-routes "/tmp/dictionaries/routes" --ports 554,5554,8554`
That means that either your cameras are not streaming in RTSP or that they are not on the subnetwork you are scanning. In most cases, CCTV cameras will be on a private subnetwork. Use the `-s` option to specify your camera's subnetwork.
> Running cameradar with masscan discovery
> Cameradar detects my cameras, but does not manage to access them at all!
`docker run --rm -t --net=host ullaakut/cameradar --scanner masscan --targets 192.168.0.0/24 --ports 554,8554`
Maybe your cameras have been configured and the credentials / URL have been changed. Cameradar only guesses using default constructor values. However, you can use your own dictionary in which you just have to add your passwords. To do that, see how the [configuration](#configuration) works.
> It does not compile
You probably missed the part with the dependencies! Use the quick docker deployment, it will be easier and will not pollute your machine with useless dependencies! `;)`
## License
Copyright 2026 Ullaakut
Copyright 2016 Etix Labs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
See the License for the specific language governing permissions and limitations under the License.
+24
View File
@@ -0,0 +1,24 @@
## Copyright 2016 Etix Labs
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
cmake_minimum_required (VERSION 2.8.1)
cmake_policy(SET CMP0042 NEW)
# set temporarly the ouput path for all server plugins
set (LIBRARY_OUTPUT_PATH ${CAMERADAR_CACHE_MANAGER_OUTPUT_PATH})
add_subdirectory(dumb_cache_manager)
add_subdirectory(mysql_cache_manager)
set (CAMERADAR_CACHE_MANAGERS ${CAMERADAR_CACHE_MANAGERS} PARENT_SCOPE)
@@ -0,0 +1,33 @@
## Copyright 2016 Etix Labs
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
cmake_minimum_required (VERSION 2.8.1)
cmake_policy(SET CMP0042 NEW)
project(dumb_cache_manager CXX)
find_package(PkgConfig)
include_directories (${PROJECT_SOURCE_DIR}/include ${CAMERADAR_INCLUDES})
include (find_sources)
find_sources ("src" "include")
add_library (dumb_cache_manager SHARED ${SOURCES})
set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--no-undefined")
target_link_libraries (dumb_cache_manager)
set (CACHE_MANAGER_NAME ${CAMERADAR_CACHE_MANAGER_OUTPUT_PATH}/${CMAKE_SHARED_LIBRARY_PREFIX}dumb_cache_manager${CMAKE_SHARED_LIBRARY_SUFFIX})
list (APPEND CAMERADAR_CACHE_MANAGERS ${CACHE_MANAGER_NAME})
set (CAMERADAR_CACHE_MANAGERS ${CAMERADAR_CACHE_MANAGERS} PARENT_SCOPE)
@@ -0,0 +1,54 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cachemanager.h>
#include <configuration.h>
#include <logger.h>
#include <stream_model.h>
#include <vector>
namespace etix {
namespace cameradar {
class dumb_cache_manager : public cache_manager_base {
private:
static const std::string name;
std::vector<etix::cameradar::stream_model> streams;
std::shared_ptr<etix::cameradar::configuration> configuration;
std::mutex m;
public:
using cache_manager_base::cache_manager_base;
~dumb_cache_manager();
const std::string& get_name() const override;
static const std::string& static_get_name();
bool load_dumb_conf(std::shared_ptr<etix::cameradar::configuration> configuration);
bool configure(std::shared_ptr<etix::cameradar::configuration> configuration) override;
bool has_changed(const etix::cameradar::stream_model&);
void set_streams(std::vector<etix::cameradar::stream_model> model);
void update_stream(const etix::cameradar::stream_model& newmodel);
std::vector<etix::cameradar::stream_model> get_streams();
std::vector<etix::cameradar::stream_model> get_valid_streams();
};
}
}
@@ -0,0 +1,104 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <dumb_cache_manager.h>
namespace etix {
namespace cameradar {
const std::string dumb_cache_manager::name = "dumb-cache-manager";
dumb_cache_manager::~dumb_cache_manager() {}
const std::string&
dumb_cache_manager::get_name() const {
return dumb_cache_manager::static_get_name();
}
const std::string&
dumb_cache_manager::static_get_name() {
return dumb_cache_manager::name;
}
bool
dumb_cache_manager::configure(std::shared_ptr<etix::cameradar::configuration> configuration) {
return this->load_dumb_conf(configuration);
}
bool
dumb_cache_manager::load_dumb_conf(std::shared_ptr<etix::cameradar::configuration> configuration) {
this->configuration = configuration;
return true;
}
//! Replaces all cached streams by the content of the vector given as
//! parameter
void
dumb_cache_manager::set_streams(std::vector<etix::cameradar::stream_model> model) {
std::lock_guard<std::mutex> lock(m);
this->streams = model;
}
//! Inserts a single stream to the cache
void
dumb_cache_manager::update_stream(const etix::cameradar::stream_model& newmodel) {
std::lock_guard<std::mutex> lock(m);
for (auto& stream : this->streams) {
if (stream.address == newmodel.address && stream.port == newmodel.port) {
stream = newmodel;
}
}
}
//! Gets all cached streams
std::vector<etix::cameradar::stream_model>
dumb_cache_manager::get_streams() {
std::vector<stream_model> ret;
for (const auto& stream : this->streams) {
if (not stream.service_name.compare("rtsp") && not stream.state.compare("open"))
ret.push_back(stream);
}
return ret;
}
//! Gets all valid streams
std::vector<etix::cameradar::stream_model>
dumb_cache_manager::get_valid_streams() {
std::vector<stream_model> ret;
for (const auto& stream : this->streams) {
if (stream.ids_found && stream.path_found) ret.push_back(stream);
}
return ret;
}
// Returns true if the stream passed as a parameter has changed in the cache
bool
dumb_cache_manager::has_changed(const etix::cameradar::stream_model& old) {
for (const auto& stream : this->streams) {
if (stream.address == old.address)
if (stream.path_found != old.path_found || stream.ids_found != old.ids_found)
return true;
}
return false;
}
extern "C" {
cache_manager_iface*
cache_manager_instance_new() {
return new dumb_cache_manager();
}
}
}
}
@@ -0,0 +1,33 @@
## Copyright 2016 Etix Labs
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
cmake_minimum_required (VERSION 2.8.1)
cmake_policy(SET CMP0042 NEW)
project(mysql_cache_manager CXX)
find_package(PkgConfig)
include_directories (${PROJECT_SOURCE_DIR}/include ${CAMERADAR_INCLUDES})
include (find_sources)
find_sources ("src" "include")
add_library (mysql_cache_manager SHARED ${SOURCES})
set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--no-undefined")
target_link_libraries (mysql_cache_manager jsoncpp mysqlcppconn pthread)
set (CACHE_MANAGER_NAME ${CAMERADAR_CACHE_MANAGER_OUTPUT_PATH}/${CMAKE_SHARED_LIBRARY_PREFIX}mysql_cache_manager${CMAKE_SHARED_LIBRARY_SUFFIX})
list (APPEND CAMERADAR_CACHE_MANAGERS ${CACHE_MANAGER_NAME})
set (CAMERADAR_CACHE_MANAGERS ${CAMERADAR_CACHE_MANAGERS} PARENT_SCOPE)
@@ -0,0 +1,85 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cppconn/resultset.h> // for ResultSet
#include <mutex> // for mutex
#include <stdbool.h> // for bool, false
#include <string> // for string
#include <utility> // for pair, make_pair
#include "query_result.h"
namespace sql {
class Connection;
class Driver;
class ResultSet;
}
namespace etix {
namespace cameradar {
namespace mysql {
//! MySQL Database connection handling
//! Abstracts all connection to the database
class db_connection {
private:
static const std::string create_database_query;
//! SQL driver
sql::Driver* driver = nullptr;
//! SQL connection
sql::Connection* connection = nullptr;
std::mutex access_mtx;
bool connected = false;
std::string db_name;
//! Create the database if it doesn't exist at connector launch
empty_result create_database(void);
public:
db_connection(void);
~db_connection(void);
//! Try to connect to the database
std::pair<bool, std::string> connect(const std::string& host,
const std::string& user,
const std::string& pass,
const std::string& db_name,
bool create_db_if_not_exist = true);
//! Execute a MySQL command
empty_result execute(const std::string& request);
//! Execute a query
query_result<sql::ResultSet*> query(const std::string& query);
bool is_connected();
//! Return db_name
const std::string&
get_db_name(void) const {
return this->db_name;
}
};
} // mysql
} // cameradar
} // etix
@@ -0,0 +1,85 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cachemanager.h>
#include <configuration.h>
#include <db_conn.h>
#include <fmt.h>
#include <logger.h>
#include <stream_model.h>
#include <vector>
namespace etix {
namespace cameradar {
struct mysql_configuration {
unsigned int port;
std::string host;
std::string db_name;
std::string user;
std::string password;
mysql_configuration() = default;
mysql_configuration(unsigned int port,
const std::string& host,
const std::string& db_name,
const std::string& user = "",
const std::string& password = "")
: port(port), host(host), db_name(db_name), user(user), password(password) {}
};
class mysql_cache_manager : public cache_manager_base {
private:
static const std::string name;
std::vector<etix::cameradar::stream_model> streams;
std::shared_ptr<etix::cameradar::configuration> configuration;
etix::cameradar::mysql_configuration db_conf;
etix::cameradar::mysql::db_connection connection;
std::mutex m;
static const std::string create_table_query;
static const std::string insert_with_id_query;
static const std::string exist_query;
static const std::string get_results_query;
static const std::string update_result_query;
public:
using cache_manager_base::cache_manager_base;
~mysql_cache_manager();
// Specific to MySQL
bool execute_query(const std::string& query);
const std::string& get_name() const override;
static const std::string& static_get_name();
bool load_mysql_conf(std::shared_ptr<etix::cameradar::configuration> configuration);
bool configure(std::shared_ptr<etix::cameradar::configuration> configuration) override;
bool has_changed(const etix::cameradar::stream_model&);
void set_streams(std::vector<etix::cameradar::stream_model> model);
void update_stream(const etix::cameradar::stream_model& newmodel);
std::vector<etix::cameradar::stream_model> get_streams();
std::vector<etix::cameradar::stream_model> get_valid_streams();
};
}
}
@@ -0,0 +1,65 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
namespace etix {
namespace cameradar {
namespace mysql {
enum class execute_result { success, not_found, no_row_updated, sql_error, error };
//! Wrapper of a DB query result
//! Templated on the data type we want to return (list<model>, bool, whatever)
template <typename DataType>
struct query_result {
DataType data;
execute_result state;
std::string error_msg;
inline bool
success(void) const {
return state == execute_result::success;
}
inline bool
error(void) const {
return not success();
}
};
//! Empty query result for when we just want to return the status
//! of the request with no associated data
template <>
struct query_result<void> {
execute_result state;
std::string error_msg;
inline bool
success(void) const {
return state == execute_result::success;
}
inline bool
error(void) const {
return not success();
}
};
typedef query_result<void> empty_result;
} //! mysql
} //! cameradar
} //! etix
@@ -0,0 +1,138 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "db_conn.h" // for db_connection
#include "cppconn/connection.h" // for Connection
#include "query_result.h" // for queries
#include <cppconn/driver.h> // for get_driver_instance, etc
#include <cppconn/exception.h> // for SQLException
#include <cppconn/statement.h> // for Statement
#include <fmt.h> // for fmt
#include <logger.h> // for LOG_
namespace etix {
namespace cameradar {
namespace mysql {
const std::string db_connection::create_database_query = "CREATE DATABASE IF NOT EXISTS %s";
db_connection::db_connection() : connected(false) {}
db_connection::~db_connection() { delete this->connection; }
std::pair<bool, std::string>
db_connection::connect(const std::string& host,
const std::string& user,
const std::string& pass,
const std::string& db_name,
bool create_db_if_not_exist) {
this->db_name = db_name;
try {
this->driver = get_driver_instance();
if (this->driver == nullptr) {
return std::make_pair(false, "Cannot instantiate sql_driver");
}
this->connection = driver->connect(host, user, pass);
if (this->connection == nullptr) return std::make_pair(false, "Cannot connect to mysql");
this->connected = true;
if (create_db_if_not_exist) {
auto cdb = this->create_database();
if (cdb.state == mysql::execute_result::sql_error) { return { false, cdb.error_msg }; }
this->connection->setSchema(db_name);
}
} catch (sql::SQLException& e) {
this->connected = false;
return { false, e.what() };
}
return std::make_pair(true, "");
}
empty_result
db_connection::execute(const std::string& request) {
std::lock_guard<std::mutex> lock(this->access_mtx);
sql::Statement* stmt = nullptr;
empty_result return_value = { execute_result::success, "" };
if (!this->is_connected()) {
return { execute_result::sql_error, "Error, not connected to MySQL database" };
}
try {
stmt = this->connection->createStatement();
stmt->execute(request);
if (stmt->getUpdateCount() == 0) {
return_value = { execute_result::no_row_updated, "No row updated" };
}
} catch (sql::SQLException& e) { return_value = { execute_result::sql_error, e.what() }; }
delete stmt;
return return_value;
}
query_result<sql::ResultSet*>
db_connection::query(const std::string& query) {
std::lock_guard<std::mutex> lock(this->access_mtx);
sql::Statement* stmt = nullptr;
query_result<sql::ResultSet*> return_value = { nullptr, execute_result::success, "" };
if (!this->is_connected()) {
return { nullptr, execute_result::sql_error, "Error, not connected to MySQL database" };
}
try {
stmt = this->connection->createStatement();
return_value = { stmt->executeQuery(query), execute_result::success, "" };
} catch (sql::SQLException& e) {
return_value = { nullptr, execute_result::sql_error, e.what() };
}
delete stmt;
return return_value;
}
bool
db_connection::is_connected() {
if (this->connection == nullptr) return false;
// check if our connection is always valid
if (this->connection->isClosed() || not this->connection->isValid()) {
LOG_INFO_("MySQL database connection is either closed or invalid, try to reconnect.",
"db_connection");
this->connection->reconnect();
if (this->connection->isClosed() || not this->connection->isValid()) {
this->connected = false;
LOG_ERR_("Unable to reconnect to MySQL.", "db_connection");
}
}
return this->connected;
}
empty_result
db_connection::create_database() {
auto query = tool::fmt(this->create_database_query, this->db_name.c_str());
return this->execute(query);
}
} // mysql
} // cameradar
} // etix
@@ -0,0 +1,292 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <mysql_cache_manager.h>
/* DATA FORMAT
**
** Example :
**
** "address" : "173.16.100.45",
** "ids_found" : true,
** "password" : "123456",
** "path_found" : true,
** "port" : 554,
** "product" : "Vivotek FD9381-HTV",
** "protocol" : "tcp",
** "route" : "/live.sdp",
** "service_name" : "rtsp",
** "state" : "open",
** "thumbnail_path" : "/tmp/127.0.0.1/1463735257.jpg",
** "username" : "admin"
**
*/
namespace etix {
namespace cameradar {
const std::string mysql_cache_manager::create_table_query =
"CREATE TABLE IF NOT EXISTS `results` ("
"`id` int(11) UNSIGNED NOT NULL AUTO_INCREMENT, "
"`address` tinytext NOT NULL, "
"`password` tinytext NOT NULL, "
"`product` tinytext NOT NULL, "
"`protocol` tinytext NOT NULL, "
"`route` tinytext NOT NULL, "
"`service_name` tinytext NOT NULL, "
"`state` tinytext NOT NULL, "
"`thumbnail_path` tinytext NOT NULL, "
"`username` tinytext NOT NULL, "
"`port` int(11) UNSIGNED NOT NULL, "
"`ids_found` tinytext NOT NULL, "
"`path_found` tinytext NOT NULL, "
"PRIMARY KEY (`id`));";
const std::string mysql_cache_manager::insert_with_id_query =
"INSERT INTO `%s`.`results`"
" (`address`, `password`, `product`, `protocol`, `route`, `service_name`, `state`, "
"`thumbnail_path`, `username`, `port`, `ids_found`, `path_found`)"
" VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')";
const std::string mysql_cache_manager::update_result_query =
"UPDATE `%s`.`results` SET"
" `results`.`address` = '%s',"
" `results`.`password` = '%s',"
" `results`.`product` = '%s',"
" `results`.`protocol` = '%s',"
" `results`.`route` = '%s',"
" `results`.`service_name` = '%s',"
" `results`.`state` = '%s',"
" `results`.`thumbnail_path` = '%s',"
" `results`.`username` = '%s',"
" `results`.`port` = '%s',"
" `results`.`ids_found` = '%s',"
" `results`.`path_found` = '%s'"
" WHERE `results`.`address` LIKE '%s'";
const std::string mysql_cache_manager::exist_query =
"SELECT * FROM `%s`.`results` WHERE `results`.`address` = '%s'";
const std::string mysql_cache_manager::get_results_query = "SELECT * FROM `%s`.`results`";
const std::string mysql_cache_manager::name = "mysql-cache-manager";
mysql_cache_manager::~mysql_cache_manager() {}
const std::string&
mysql_cache_manager::get_name() const {
return mysql_cache_manager::static_get_name();
}
const std::string&
mysql_cache_manager::static_get_name() {
return mysql_cache_manager::name;
}
bool
mysql_cache_manager::configure(std::shared_ptr<etix::cameradar::configuration> configuration) {
return this->load_mysql_conf(configuration);
}
bool
mysql_cache_manager::execute_query(const std::string& query) {
auto check_err = [](const auto& res) {
if (res.state == mysql::execute_result::sql_error) {
LOG_WARN_(res.error_msg, "mysql_cache_manager");
return false;
}
return true;
};
return check_err(this->connection.execute(query));
}
bool
mysql_cache_manager::load_mysql_conf(
std::shared_ptr<etix::cameradar::configuration> configuration) {
this->configuration = configuration;
try {
this->db_conf.host = configuration->raw_conf["mysql_db"]["host"].asString();
this->db_conf.port = configuration->raw_conf["mysql_db"]["port"].asUInt();
this->db_conf.user = configuration->raw_conf["mysql_db"]["user"].asString();
this->db_conf.password = configuration->raw_conf["mysql_db"]["password"].asString();
this->db_conf.db_name = configuration->raw_conf["mysql_db"]["db_name"].asString();
} catch (const std::exception& e) {
LOG_ERR_("Configuration of the MySQL db failed : " + std::string(e.what()),
"mysql_cache_manager");
return false;
}
if (not this->connection
.connect(db_conf.host + ":" + std::to_string(db_conf.port),
db_conf.user,
db_conf.password,
db_conf.db_name)
.first) {
LOG_ERR_("Configuration of the MySQL DB failed", "mysql_cache_manager");
return false;
}
// Tries to create the Result table in the DB and returns the success state
return (execute_query(create_table_query));
}
//! Replaces all cached streams by the content of the vector given as
//! parameter
void
mysql_cache_manager::set_streams(std::vector<etix::cameradar::stream_model> models) {
LOG_DEBUG_("Beginning stream list DB insertion", "mysql_cache_manager");
std::lock_guard<std::mutex> lock(m);
for (const auto& model : models) {
if (!model.service_name.compare("rtsp") && !model.state.compare("open")) {
auto query = tool::fmt(
this->exist_query, this->connection.get_db_name().c_str(), model.address.c_str());
auto result = this->connection.query(query);
if (result.data->next()) continue;
query = tool::fmt(this->insert_with_id_query,
this->connection.get_db_name().c_str(),
model.address.c_str(),
model.password.c_str(),
model.product.c_str(),
model.protocol.c_str(),
model.route.c_str(),
model.service_name.c_str(),
model.state.c_str(),
model.thumbnail_path.c_str(),
model.username.c_str(),
std::to_string(model.port).c_str(),
std::to_string(model.ids_found).c_str(),
std::to_string(model.path_found).c_str());
execute_query(query);
}
}
}
//! Inserts a single stream to the cache
void
mysql_cache_manager::update_stream(const etix::cameradar::stream_model& model) {
auto query = tool::fmt(this->update_result_query,
this->connection.get_db_name().c_str(),
model.address.c_str(),
model.password.c_str(),
model.product.c_str(),
model.protocol.c_str(),
model.route.c_str(),
model.service_name.c_str(),
model.state.c_str(),
model.thumbnail_path.c_str(),
model.username.c_str(),
std::to_string(model.port).c_str(),
std::to_string(model.ids_found).c_str(),
std::to_string(model.path_found).c_str(),
model.address.c_str());
std::lock_guard<std::mutex> lock(m);
execute_query(query);
}
//! Gets all cached streams
std::vector<etix::cameradar::stream_model>
mysql_cache_manager::get_streams() {
auto query = tool::fmt(this->get_results_query, this->connection.get_db_name().c_str());
auto result = this->connection.query(query);
if (not result.data) {
delete result.data;
return {};
}
std::vector<stream_model> lst;
while (result.data->next()) {
// If it's an open RTSP stream
if (not result.data->getString("state").compare("open") &&
not result.data->getString("service_name").compare("rtsp")) {
stream_model s{
result.data->getString("address"), result.data->getUInt("port"),
result.data->getString("username"), result.data->getString("password"),
result.data->getString("route"), result.data->getString("service_name"),
result.data->getString("product"), result.data->getString("protocol"),
result.data->getString("state"), result.data->getBoolean("path_found"),
result.data->getBoolean("ids_found"), result.data->getString("thumbnail_path")
};
lst.push_back(s);
}
}
delete result.data;
return lst;
}
//! Gets all valid streams
std::vector<etix::cameradar::stream_model>
mysql_cache_manager::get_valid_streams() {
auto query = tool::fmt(this->get_results_query, this->connection.get_db_name().c_str());
auto result = this->connection.query(query);
if (not result.data) {
delete result.data;
return {};
}
std::vector<stream_model> lst;
while (result.data->next()) {
// If the ID and the Path were found add this stream
if (not result.data->getString("ids_found").compare("1") &&
not result.data->getString("path_found").compare("1")) {
stream_model s{
result.data->getString("address"), result.data->getUInt("port"),
result.data->getString("username"), result.data->getString("password"),
result.data->getString("route"), result.data->getString("service_name"),
result.data->getString("product"), result.data->getString("protocol"),
result.data->getString("state"), result.data->getBoolean("path_found"),
result.data->getBoolean("ids_found"), result.data->getString("thumbnail_path")
};
lst.push_back(s);
}
}
delete result.data;
return lst;
}
// Returns true if the stream passed as a parameter has changed in the cache
bool
mysql_cache_manager::has_changed(const etix::cameradar::stream_model& old) {
auto query = tool::fmt(this->get_results_query, this->connection.get_db_name().c_str());
auto result = this->connection.query(query);
if (not result.data) {
delete result.data;
return {};
}
while (result.data->next()) {
if (result.data->getString("address") == old.address)
if (result.data->getBoolean("ids_found") != old.ids_found ||
result.data->getBoolean("path_found") != old.path_found)
return true;
}
return false;
}
extern "C" {
cache_manager_iface*
cache_manager_instance_new() {
return new mysql_cache_manager();
}
}
}
}
-78
View File
@@ -1,78 +0,0 @@
package cameradar
import (
"context"
"errors"
"fmt"
)
// Reporter reports progress and results of the application.
type Reporter interface {
Start(step Step, message string)
Done(step Step, message string)
Error(step Step, err error)
Summary(streams []Stream, err error)
}
// App scans one or more targets and attacks all RTSP streams found to get their credentials.
type App struct {
streamScanner StreamScanner
attacker StreamAttacker
reporter Reporter
targets []string
ports []string
}
// StreamScanner discovers RTSP streams for the given inputs.
type StreamScanner interface {
Scan(ctx context.Context) ([]Stream, error)
}
// StreamAttacker attacks streams to discover routes and credentials.
type StreamAttacker interface {
Attack(ctx context.Context, streams []Stream) ([]Stream, error)
}
// New creates a new App with explicit dependencies.
func New(streamScanner StreamScanner, attacker StreamAttacker, targets, ports []string, reporter Reporter) (*App, error) {
if streamScanner == nil {
return nil, errors.New("stream scanner is required")
}
if attacker == nil {
return nil, errors.New("stream attacker is required")
}
app := &App{
streamScanner: streamScanner,
attacker: attacker,
targets: targets,
ports: ports,
reporter: reporter,
}
return app, nil
}
// Run runs the scan and prints the results.
func (a *App) Run(ctx context.Context) error {
a.reporter.Start(StepScan, "Scanning targets for RTSP streams")
streams, err := a.streamScanner.Scan(ctx)
if err != nil {
wrapped := fmt.Errorf("discovering devices: %w", err)
a.reporter.Error(StepScan, wrapped)
a.reporter.Summary(streams, wrapped)
return wrapped
}
a.reporter.Done(StepScan, "Scan complete")
streams, err = a.attacker.Attack(ctx, streams)
if err != nil {
wrapped := fmt.Errorf("attacking devices: %w", err)
a.reporter.Summary(streams, wrapped)
return wrapped
}
a.reporter.Summary(streams, nil)
return nil
}
+60
View File
@@ -0,0 +1,60 @@
## Copyright 2016 Etix Labs
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
cmake_minimum_required (VERSION 2.8.1)
cmake_policy(SET CMP0048 OLD)
project(cameradar CXX)
# find gstreamer 1.x libraries
include(FindPkgConfig)
pkg_search_module(GSTREAMER REQUIRED gstreamer-1.0)
find_library(LIB_GSTREAMER NAMES ${GSTREAMER_LIBRARIES} HINTS ${GSTREAMER_LIBRARY_DIRS})
include_directories (
${GSTREAMER_INCLUDE_DIRS}
${PROJECT_SOURCE_DIR}/include
${VERSION_INCLUDE_DIR}
)
link_directories (
${GSTREAMER_LIBRARY_DIRS}
"../deps/jsoncpp/src/deps.jsoncpp/src/lib_json"
)
if ("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
# search special osx gstreamer libs
pkg_search_module(GSTREAMER_APP REQUIRED gstreamer-app-1.0)
find_library(LIB_GSTREAMER NAMES ${GSTREAMER_APP_LIBRARIES} HINTS ${GSTREAMER_APP_LIBRARY_DIRS})
include_directories (${GSTREAMER_APP_INCLUDE_DIRS})
link_directories (${GSTREAMER_APP_LIBRARY_DIRS})
endif()
include (find_sources)
find_sources ("src" "include" "src/tasks")
add_executable (cameradar ${SOURCES})
target_link_libraries (cameradar pthread jsoncpp dl curl ${GSTREAMER_LIBRARIES})
# Add the conf files to the build dir
add_custom_command(TARGET cameradar PRE_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_directory
${CMAKE_SOURCE_DIR}/cameradar_standalone/conf $<TARGET_FILE_DIR:cameradar>/conf/)
set (BINARIES_NAME ${PROJECT_BINARY_DIR}/cameradar)
list (APPEND CAMERADAR_BINARIES ${BINARIES_NAME})
set (CAMERADAR_BINARIES ${CAMERADAR_BINARIES} PARENT_SCOPE)
@@ -0,0 +1,16 @@
{
"mysql_db" : {
"host" : "cameradar-database",
"port" : 3306,
"user": "root",
"password": "root",
"db_name": "cmrdr"
},
"subnets" : "localhost",
"ports" : "554,8554",
"rtsp_url_file" : "/cameradar/conf/url.json",
"rtsp_ids_file" : "/cameradar/conf/ids.json",
"thumbnail_storage_path" : "/tmp/thumbs",
"cache_manager_path" : "/cameradar/cache_managers",
"cache_manager_name" : "dumb"
}
+31
View File
@@ -0,0 +1,31 @@
{
"username": [
"",
"admin",
"Admin",
"root",
"supervisor",
"ubnt"
],
"password" : [
"",
"admin",
"9999",
"123456",
"pass",
"camera",
"1234",
"12345",
"fliradmin",
"system",
"jvc",
"meinsm",
"root",
"4321",
"1111111",
"password",
"ikwd",
"supervisor",
"ubnt"
]
}
+78
View File
@@ -0,0 +1,78 @@
{
"urls" : [
"/",
"/1.AMP",
"/1/stream1",
"/CAM_ID.password.mp2",
"/GetData.cgi",
"/MediaInput/h264",
"/MediaInput/mpeg4",
"/VideoInput/1/h264/1",
"/access_code",
"/access_name_for_stream_1_to_5",
"/av0_0",
"/av2",
"/avn=2",
"/axis-media/media.amp",
"/cam",
"/cam0_0",
"/cam0_1",
"/cam1/h264",
"/cam1/h264/multicast",
"/cam1/mjpeg",
"/cam1/mpeg4",
"/camera.stm",
"/ch0",
"/ch001.sdp",
"/ch01.264",
"/ch0_unicast_firststream",
"/ch0_unicast_secondstream",
"/channel1",
"/h264",
"/h264/media.amp",
"/image.mpg",
"/img/media.sav",
"/img/video.asf",
"/img/video.sav",
"/ioImage/1",
"/ipcam.sdp",
"/ipcam_h264.sdp",
"/live.sdp",
"/live/h264",
"/live/mpeg4",
"/live_mpeg4.sdp",
"/livestream",
"/livestream/",
"/media/media.amp",
"/media/video1",
"/mjpeg/media.smp",
"/mp4",
"/mpeg4",
"/mpeg4/1/media.amp",
"/mpeg4/media.amp",
"/mpeg4/media.smp",
"/mpeg4unicast",
"/mpg4/rtsp.amp",
"/multicaststream",
"/now.mp4",
"/nph-h264.cgi",
"/nphMpeg4/g726-640x",
"/nphMpeg4/g726-640x480",
"/nphMpeg4/nil-320x240",
"/play1.sdp",
"/play2.sdp",
"/rtpvideo1.sdp",
"/rtsp_tunnel",
"/rtsph264",
"/stream1",
"/user.pin.mp2",
"/user_defined",
"/video",
"/video.3gp",
"/video.mp4",
"/video1",
"/video1+audio1",
"/vis",
"/wfov"
]
}
+185
View File
@@ -0,0 +1,185 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <configuration.h>
#include <memory>
#include <mutex>
#include <stream_model.h>
#include <vector>
namespace etix {
namespace cameradar {
// The interface a cache_manager should implement to be valid
class cache_manager_iface {
public:
virtual ~cache_manager_iface() {}
// Launches the manager configuration
// \return false if failed
virtual bool configure(std::shared_ptr<etix::cameradar::configuration> configuration) = 0;
// get the name of the cache manager
virtual const std::string& get_name() const = 0;
// Replaces all cached streams by the content of the vector given as
// parameter
virtual void set_streams(std::vector<etix::cameradar::stream_model> model) = 0;
// Inserts a single stream to the cache
virtual void update_stream(const etix::cameradar::stream_model& newmodel) = 0;
// Returns true if the stream passed as a parameter has changed in the cache
virtual bool has_changed(const etix::cameradar::stream_model&) = 0;
// Gets all cached streams
virtual std::vector<etix::cameradar::stream_model> get_streams() = 0;
// Gets all valid streams which have been accessed
virtual std::vector<etix::cameradar::stream_model> get_valid_streams() = 0;
};
class cache_manager_base : public cache_manager_iface {
public:
cache_manager_base() = default;
virtual ~cache_manager_base() = default;
// Launches the cache manager configuration
// \return false if failed
virtual bool configure(std::shared_ptr<etix::cameradar::configuration> configuration) = 0;
// get the name of the cache manager
virtual const std::string& get_name() const = 0;
// Replaces all cached streams by the content of the vector given as
// parameter
virtual void set_streams(std::vector<etix::cameradar::stream_model> model) = 0;
// Returns true if the stream passed as a parameter has changed in the cache
virtual bool has_changed(const etix::cameradar::stream_model&) = 0;
// Updates a single stream to the cache
virtual void update_stream(const etix::cameradar::stream_model& newmodel) = 0;
// Gets all cached streams
virtual std::vector<etix::cameradar::stream_model> get_streams() = 0;
// Gets all valid streams which have been accessed
virtual std::vector<etix::cameradar::stream_model> get_valid_streams() = 0;
// Get the manager's instance
cache_manager_base& get_instance();
template <typename I, typename T>
std::shared_ptr<T>
get() {
static_assert(std::is_base_of<cache_manager_base, I>::value,
"I must implement cache_manager_base");
std::shared_ptr<I> cache_manager(dynamic_cast<I*>(this));
if (not cache_manager) return nullptr;
return cache_manager->template get<T>();
}
};
// The representation of a cache manager
//
// This class loads a shared library, and tries to call an extern "C"
// function which should instanciate a new instance of the plugin.
class cache_manager {
private:
static const std::string PLUGIN_EXT;
static const std::string default_symbol;
// The name of the cache manager
std::string name;
// The write mutex to avoid conflicts when multithreading
std::mutex m;
// The path where the manager is located
// should be specified in the configuration file
std::string path;
// The symbol entry point of the manager to
// call to create an instance from the shared library
std::string symbol;
// The handle to the shared library where is stored the manager
void* handle = nullptr;
// The cache manager instance if it is successfully loaded
cache_manager_iface* ptr = nullptr;
// Internal function that creates the full path of the cache manager
//
// full path is composed of: the path, the name, the string "_cache-manager"
// and the extension PLUGIN_EXT depending of the platform
std::string make_full_path();
public:
// Delete constructor
cache_manager() = delete;
// The manager needs a path and a symbol to be instantiated.
// The symbol can be changed if the plugin entry point
// is different than the standard one.
cache_manager(const std::string& path,
const std::string& name,
const std::string& symbol = default_symbol);
// // Copy constructor
// cache_manager(cache_manager &other);
// Move constructor
cache_manager(cache_manager&& old);
~cache_manager();
// Creates the instance of the cache_manager
//
// \return false if the cache_manager failed to be instantiated or if
// the cache_manager is not a valid cache manager, true otherwise
bool make_instance();
template <typename I, typename T>
std::shared_ptr<T>
get() {
static_assert(std::is_base_of<cache_manager_base, I>::value,
"I must implement plugin_base");
return this->get<I, T>();
}
// Helper to access internal loaded cache_manager
//
// Gives access to the methods of the cache_manager using the operator
// -> (e.g.: cache_manager->get_name());
cache_manager_iface* operator->();
const cache_manager_iface* operator->() const;
// helper function to check if a cache_manager is instantiated or not
friend bool operator==(std::nullptr_t nullp, const cache_manager& p);
// helper function to check if a cache_manager is instantiated or not
friend bool operator==(const cache_manager& p, std::nullptr_t nullp);
// helper function to check if a cache_manager is instantiated or not
friend bool operator!=(std::nullptr_t nullp, const cache_manager& p);
// helper function to check if a cache_manager is instantiated or not
friend bool operator!=(const cache_manager& p, std::nullptr_t nullp);
};
}
}
@@ -0,0 +1,30 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string> // std::string
#include <memory> // std::shared_ptr
#include <configuration.h> // conf
namespace etix {
namespace cameradar {
class cameradar_task {
public:
virtual bool run() const = 0;
};
}
}
@@ -0,0 +1,79 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <json/reader.h> // Json::Value
#include <json/value.h> // Json::Value
#include <logger.h> // _LOG_
#include <opt_parse.h> // parsing opt
#include <string> // std::string
#include <utility> // std::pair
namespace etix {
namespace cameradar {
static const std::string default_configuration_path = "conf/cameradar.conf.json";
static const std::string default_ports = "554,8554";
static const std::string default_subnets = "localhost,168.0.0.0/24";
static const std::string default_thumbnail_storage_path = "/tmp";
static const std::string default_rtsp_url_file = "conf/url.json";
static const std::string default_rtsp_ids_file = "conf/ids.json";
static const std::string default_cache_manager_path = "../cache_managers/dumb_cache_manager";
static const std::string default_cache_manager_name = "dumb";
struct configuration {
std::string thumbnail_storage_path;
std::string subnets;
std::string rtsp_url_file;
std::string rtsp_ids_file;
std::string ports;
std::string cache_manager_path;
std::string cache_manager_name;
std::vector<std::string> paths;
std::vector<std::string> usernames;
std::vector<std::string> passwords;
Json::Value raw_conf;
configuration() = default;
configuration(const std::string& thumbnail_storage_path,
const std::string& subnets,
const std::string& rtsp_url_file,
const std::string& rtsp_ids_file,
const std::string& cache_manager_path,
const std::string& cache_manager_name,
const std::string& ports)
: thumbnail_storage_path(thumbnail_storage_path)
, subnets(subnets)
, rtsp_url_file(rtsp_url_file)
, rtsp_ids_file(rtsp_ids_file)
, ports(ports)
, cache_manager_path(cache_manager_path)
, cache_manager_name(cache_manager_name) {}
static const std::string name_;
bool load_ids();
bool load_url();
Json::Value get_raw() const;
};
std::pair<bool, std::string> read_file(const std::string& path);
std::pair<bool, configuration> load(const std::pair<bool, etix::tool::opt_parse>& args);
}
}
+26
View File
@@ -0,0 +1,26 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory> // std::shared_ptr
#include <logger.h> // LOG
#include <curl/curl.h> // cURL client for discovery
#include <encode.h> // b64
namespace etix {
namespace cameradar {
bool curl_describe(const std::string& path, bool logs);
}
}
+84
View File
@@ -0,0 +1,84 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <list> // sig
#include <memory> // std::shared_ptr
#include <opt_parse.h> // parsing opt
#include <logger.h> // LOG
#include <configuration.h> // conf
#include <thread> // std::thread
#include <chrono> // operator""ms
#include <signal_handler.h> // sig
// All the tasks managed by the dispatcher
#include <tasks/mapping.h>
#include <tasks/parsing.h>
#include <tasks/brutelogs.h>
#include <tasks/brutepath.h>
#include <tasks/thumbnail.h>
#include <tasks/stream_check.h>
#include <tasks/print.h>
namespace etix {
namespace cameradar {
enum class task {
init,
preparation,
mapping,
parsing,
brutepath,
bruteforce,
thumb_generation,
print,
finished
};
class dispatcher {
private:
bool busy;
task current;
std::string nmap_output;
const configuration& conf;
std::shared_ptr<etix::cameradar::cache_manager> cache;
const std::pair<bool, etix::tool::opt_parse>& opts;
std::list<cameradar_task*> queue;
public:
dispatcher() = delete;
dispatcher(const configuration& conf,
std::shared_ptr<etix::cameradar::cache_manager> cache,
const std::pair<bool, etix::tool::opt_parse>& opts)
: busy(false)
, current(task::init)
, nmap_output("/tmp/scans/scan" + std::to_string(std::chrono::system_clock::to_time_t(
std::chrono::system_clock::now())) +
".xml")
, conf(conf)
, cache(cache)
, opts(opts){};
~dispatcher() = default;
bool
doing_stuff() const {
return this->busy;
}
void do_stuff();
void run();
};
}
}
+39
View File
@@ -0,0 +1,39 @@
/*
base64.cpp and base64.h
Copyright (C) 2004-2008 René Nyffenegger
This source code is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this source code must not be misrepresented; you must not
claim that you wrote the original source code. If you use this source code
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original source code.
3. This notice may not be removed or altered from any source distribution.
René Nyffenegger rene.nyffenegger@adp-gmbh.ch
*/
#pragma once
#include <string>
namespace etix {
namespace tool {
namespace encode {
std::string encode64(const std::string& str_to_encode);
std::string decode64(const std::string& str_to_decode);
std::string base64_encode(unsigned char const*, unsigned int len);
std::string base64_decode(std::string const& s);
} // encode
} // tool
} // etix
+42
View File
@@ -0,0 +1,42 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string>
#include <iostream>
#include <mutex>
namespace etix {
namespace tool {
static std::mutex mutex;
// Format a string with the given arguments
// same behavior as sprintf.
template <class... Args>
std::string
fmt(const std::string& base, Args... args) {
std::lock_guard<std::mutex> guard(mutex);
static char buf[512];
std::sprintf(buf, base.c_str(), args...);
return std::string(buf);
}
} // tool
} // etix
+47
View File
@@ -0,0 +1,47 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <pwd.h>
#include <sys/types.h>
#include <unistd.h>
#include <fstream>
#include <string>
namespace etix {
namespace tool {
namespace fs {
enum class fs_error { is_dir, is_not_dir, dont_exist };
fs_error is_folder(const std::string& folder);
bool get_or_create_folder(const std::string& folder);
bool create_folder(const std::string& folder);
bool create_recursive_folder(const std::string& folder);
std::string home();
// this functions take a copy because we need to make some operations on the string
// for example, we need to apply std::string::pop_back
std::string get_file_folder(std::string full_file_path);
bool copy(const std::string& src, const std::string& dst);
} // fs
} // tool
} // etix
@@ -0,0 +1,25 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string> // std::string
#include <logger.h> // LOG
#include <stdlib.h> // system
namespace etix {
namespace cameradar {
bool launch_command(const std::string& cmd);
}
}
+116
View File
@@ -0,0 +1,116 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <sstream>
#include <string>
#include "spdlog/spdlog.h"
namespace etix {
namespace tool {
inline std::string
format_output(const std::string& from, const std::string& message) {
auto ss = std::stringstream{};
ss << "(" << from << "): ";
ss << message;
return ss.str();
}
enum class loglevel { DEBUG = 1, INFO = 2, WARN = 4, ERR = 5, CRITICAL = 6 };
class logger {
std::string name;
std::shared_ptr<spdlog::logger> console;
logger(const std::string& plugin)
: name(plugin), console(spdlog::stdout_logger_mt("cameradar")) {}
public:
static logger&
get_instance(const std::string& name = "") {
static logger self(name);
return self;
}
void
set_level(loglevel level) {
switch (level) {
case loglevel::DEBUG: this->console->set_level(spdlog::level::level_enum::debug); break;
case loglevel::INFO: this->console->set_level(spdlog::level::level_enum::info); break;
case loglevel::WARN: this->console->set_level(spdlog::level::level_enum::warn); break;
case loglevel::ERR: this->console->set_level(spdlog::level::level_enum::err); break;
case loglevel::CRITICAL:
this->console->set_level(spdlog::level::level_enum::critical);
break;
}
}
std::string
get_name() const {
return this->name;
}
static void
info(const std::string& message) {
etix::tool::logger::get_instance().console->info(message);
}
static void
warn(const std::string& message) {
etix::tool::logger::get_instance().console->warn(message);
}
static void
err(const std::string& message) {
etix::tool::logger::get_instance().console->error(message);
}
static void
crit(const std::string& message) {
etix::tool::logger::get_instance().console->critical(message);
}
static void
debug(const std::string& message) {
etix::tool::logger::get_instance().console->debug(message);
}
};
}
}
// Should be replaced to calls to spdlog::logger::getlogger(const std::string&
// name)
#define LOG_WARN_(message, from) \
etix::tool::logger::get_instance().warn(etix::tool::format_output( \
std::string(from) + "::" + __FUNCTION__ + ":" + std::to_string(__LINE__), message))
#define LOG_ERR_(message, from) \
etix::tool::logger::get_instance().err(etix::tool::format_output( \
std::string(from) + "::" + __FUNCTION__ + ":" + std::to_string(__LINE__), message))
#define LOG_DEBUG_(message, from) \
etix::tool::logger::get_instance().debug(etix::tool::format_output( \
std::string(from) + "::" + __FUNCTION__ + ":" + std::to_string(__LINE__), message))
#define LOG_INFO_(message, from) \
etix::tool::logger::get_instance().info(etix::tool::format_output( \
std::string(from) + "::" + __FUNCTION__ + ":" + std::to_string(__LINE__), message))
#define LOG_CRIT_(message, from) \
etix::tool::logger::get_instance().crit(etix::tool::format_output( \
std::string(from) + "::" + __FUNCTION__ + ":" + std::to_string(__LINE__), message))
+98
View File
@@ -0,0 +1,98 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string> // for string
#include <unordered_map> // for unordered_map
#include <utility> // for pair
#include <vector> // for vector
namespace etix {
namespace tool {
class opt_parse {
private:
struct opt_param {
bool required;
bool need_arg;
std::string name;
std::string desc;
std::string argument;
bool is_passed = false;
opt_param(bool required, bool need_arg, std::string name, std::string desc)
: required(required), need_arg(need_arg), name(name), desc(desc) {}
};
std::unordered_map<std::string, opt_param> params;
int argc;
char** argv;
int params_cnt = 0;
public:
class iterator {
private:
std::vector<std::pair<std::string, std::string>> args;
unsigned int opt_pos = 0;
public:
iterator(std::vector<std::pair<std::string, std::string>> args, unsigned int opt_pos)
: args(args), opt_pos(opt_pos) {}
iterator operator++() {
this->opt_pos += 1;
return *this;
}
std::pair<std::string, std::string>& operator*() { return this->args.at(this->opt_pos); }
bool
operator==(const iterator& rhs) const {
return this->opt_pos == rhs.opt_pos;
}
bool
operator!=(const iterator& rhs) const {
return this->opt_pos != rhs.opt_pos;
}
};
opt_parse() = delete;
opt_parse(int argc, char* argv[]);
~opt_parse();
void required(const std::string& name, const std::string& desc = "", bool need_arg = true);
void optional(const std::string& name, const std::string& desc = "", bool need_arg = true);
bool execute();
iterator begin() const;
iterator end() const;
void print_usage() const;
void print_help() const;
bool has_error() const;
bool exist(const std::string& opt) const;
std::string operator[](const std::string& opt) const;
};
} // tool
} // etix
+26
View File
@@ -0,0 +1,26 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <string> // for string
#include <stream_model.h> // for stream_model
namespace etix {
namespace cameradar {
const std::string make_path(const stream_model& model);
}
}
@@ -0,0 +1,70 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <assert.h> // assert
#include <csignal> // sigint
#include <iostream> // stc::cout
// To avoid an unused warning for the asserted in handle_signal
#define _unused(x) ((void)(x))
namespace etix {
namespace cameradar {
enum class stop_priority { running, stop, force_stop };
class event_handler {
public:
event_handler(void) : ss(stop_priority::running) {}
virtual int
handle_signal(int signum) {
assert(signum == SIGINT);
_unused(signum);
std::cout << "\b\b\b\033[K";
if (this->ss == stop_priority::running)
this->ss = stop_priority::stop;
else
this->ss = stop_priority::force_stop;
return 0;
}
etix::cameradar::stop_priority
should_stop(void) const {
return this->ss;
}
private:
stop_priority ss;
};
class signal_handler {
private:
signal_handler(void);
signal_handler(const signal_handler&);
signal_handler& operator=(const signal_handler&);
static void call_handler(int signum);
static event_handler handler;
public:
static signal_handler& instance(void);
etix::cameradar::stop_priority should_stop(void) const;
};
}
}
@@ -0,0 +1,71 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
// Very fast asynchronous logger (millions of logs per second on an average desktop)
// Uses pre allocated lockfree queue for maximum throughput even under large number of threads.
// Creates a single back thread to pop messages from the queue and log them.
//
// Upon each log write the logger:
// 1. Checks if its log level is enough to log the message
// 2. Push a new copy of the message to a queue (or block the caller until space is available in
// the queue)
// 3. will throw spdlog_ex upon log exceptions
// Upong destruction, logs all remaining messages in the queue before destructing..
#include <chrono>
#include <functional>
#include "common.h"
#include "logger.h"
#include "spdlog.h"
namespace spdlog {
namespace details {
class async_log_helper;
}
class async_logger : public logger {
public:
template <class It>
async_logger(
const std::string& name,
const It& begin,
const It& end,
size_t queue_size,
const async_overflow_policy overflow_policy = async_overflow_policy::block_retry,
const std::function<void()>& worker_warmup_cb = nullptr,
const std::chrono::milliseconds& flush_interval_ms = std::chrono::milliseconds::zero());
async_logger(
const std::string& logger_name,
sinks_init_list sinks,
size_t queue_size,
const async_overflow_policy overflow_policy = async_overflow_policy::block_retry,
const std::function<void()>& worker_warmup_cb = nullptr,
const std::chrono::milliseconds& flush_interval_ms = std::chrono::milliseconds::zero());
async_logger(
const std::string& logger_name,
sink_ptr single_sink,
size_t queue_size,
const async_overflow_policy overflow_policy = async_overflow_policy::block_retry,
const std::function<void()>& worker_warmup_cb = nullptr,
const std::chrono::milliseconds& flush_interval_ms = std::chrono::milliseconds::zero());
void flush() override;
protected:
void _log_msg(details::log_msg& msg) override;
void _set_formatter(spdlog::formatter_ptr msg_formatter) override;
void _set_pattern(const std::string& pattern) override;
private:
std::unique_ptr<details::async_log_helper> _async_log_helper;
};
}
#include "./details/async_logger_impl.h"
@@ -0,0 +1,88 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <string>
#include <initializer_list>
#include <chrono>
#include <memory>
// visual studio does not support noexcept yet
#ifndef _MSC_VER
#define SPDLOG_NOEXCEPT noexcept
#else
#define SPDLOG_NOEXCEPT throw()
#endif
namespace spdlog {
class formatter;
namespace sinks {
class sink;
}
// Common types across the lib
using log_clock = std::chrono::system_clock;
using sink_ptr = std::shared_ptr<sinks::sink>;
using sinks_init_list = std::initializer_list<sink_ptr>;
using formatter_ptr = std::shared_ptr<spdlog::formatter>;
// Log level enum
namespace level {
typedef enum {
trace = 0,
debug = 1,
info = 2,
notice = 3,
warn = 4,
err = 5,
critical = 6,
alert = 7,
emerg = 8,
off = 9
} level_enum;
static const char* level_names[]{ "trace", "debug", "info", "notice", "warning",
"error", "critical", "alert", "emerg", "off" };
static const char* short_level_names[]{ "T", "D", "I", "N", "W", "E", "C", "A", "M", "O" };
inline const char*
to_str(spdlog::level::level_enum l) {
return level_names[l];
}
inline const char*
to_short_str(spdlog::level::level_enum l) {
return short_level_names[l];
}
} // level
//
// Async overflow policy - block by default.
//
enum class async_overflow_policy {
block_retry, // Block / yield / sleep until message can be enqueued
discard_log_msg // Discard the message it enqueue fails
};
//
// Log exception
//
class spdlog_ex : public std::exception {
public:
explicit spdlog_ex(const std::string& msg) : _msg(msg) {}
const char*
what() const SPDLOG_NOEXCEPT override {
return _msg.c_str();
}
private:
std::string _msg;
};
} // spdlog
@@ -0,0 +1,313 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
// async log helper :
// Process logs asynchronously using a back thread.
//
// If the internal queue of log messages reaches its max size,
// then the client call will block until there is more room.
//
// If the back thread throws during logging, a spdlog::spdlog_ex exception
// will be thrown in client's thread when tries to log the next message
#pragma once
#include <chrono>
#include <thread>
#include <functional>
#include "../common.h"
#include "../sinks/sink.h"
#include "./mpmc_bounded_q.h"
#include "./log_msg.h"
#include "./format.h"
#include "./os.h"
namespace spdlog {
namespace details {
class async_log_helper {
// Async msg to move to/from the queue
// Movable only. should never be copied
enum class async_msg_type { log, flush, terminate };
struct async_msg {
std::string logger_name;
level::level_enum level;
log_clock::time_point time;
size_t thread_id;
std::string txt;
async_msg_type msg_type;
async_msg() = default;
~async_msg() = default;
async_msg(async_msg&& other) SPDLOG_NOEXCEPT : logger_name(std::move(other.logger_name)),
level(std::move(other.level)),
time(std::move(other.time)),
txt(std::move(other.txt)),
msg_type(std::move(other.msg_type)) {}
async_msg(async_msg_type m_type) : msg_type(m_type){};
async_msg& operator=(async_msg&& other) SPDLOG_NOEXCEPT {
logger_name = std::move(other.logger_name);
level = other.level;
time = std::move(other.time);
thread_id = other.thread_id;
txt = std::move(other.txt);
msg_type = other.msg_type;
return *this;
}
// never copy or assign. should only be moved..
async_msg(const async_msg&) = delete;
async_msg& operator=(async_msg& other) = delete;
// construct from log_msg
async_msg(const details::log_msg& m)
: logger_name(m.logger_name)
, level(m.level)
, time(m.time)
, thread_id(m.thread_id)
, txt(m.raw.data(), m.raw.size())
, msg_type(async_msg_type::log) {}
// copy into log_msg
void
fill_log_msg(log_msg& msg) {
msg.clear();
msg.logger_name = logger_name;
msg.level = level;
msg.time = time;
msg.thread_id = thread_id;
msg.raw << txt;
}
};
public:
using item_type = async_msg;
using q_type = details::mpmc_bounded_queue<item_type>;
using clock = std::chrono::steady_clock;
async_log_helper(
formatter_ptr formatter,
const std::vector<sink_ptr>& sinks,
size_t queue_size,
const async_overflow_policy overflow_policy = async_overflow_policy::block_retry,
const std::function<void()>& worker_warmup_cb = nullptr,
const std::chrono::milliseconds& flush_interval_ms = std::chrono::milliseconds::zero());
void log(const details::log_msg& msg);
// stop logging and join the back thread
~async_log_helper();
void set_formatter(formatter_ptr);
void flush();
private:
formatter_ptr _formatter;
std::vector<std::shared_ptr<sinks::sink>> _sinks;
// queue of messages to log
q_type _q;
bool _flush_requested;
bool _terminate_requested;
// last exception thrown from the worker thread
std::shared_ptr<spdlog_ex> _last_workerthread_ex;
// overflow policy
const async_overflow_policy _overflow_policy;
// worker thread warmup callback - one can set thread priority, affinity, etc
const std::function<void()> _worker_warmup_cb;
// auto periodic sink flush parameter
const std::chrono::milliseconds _flush_interval_ms;
// worker thread
std::thread _worker_thread;
void push_msg(async_msg&& new_msg);
// throw last worker thread exception or if worker thread is not active
void throw_if_bad_worker();
// worker thread main loop
void worker_loop();
// pop next message from the queue and process it. will set the last_pop to the pop time
// return false if termination of the queue is required
bool process_next_msg(log_clock::time_point& last_pop, log_clock::time_point& last_flush);
void handle_flush_interval(log_clock::time_point& now, log_clock::time_point& last_flush);
// sleep,yield or return immediatly using the time passed since last message as a hint
static void sleep_or_yield(const spdlog::log_clock::time_point& now,
const log_clock::time_point& last_op_time);
};
}
}
///////////////////////////////////////////////////////////////////////////////
// async_sink class implementation
///////////////////////////////////////////////////////////////////////////////
inline spdlog::details::async_log_helper::async_log_helper(
formatter_ptr formatter,
const std::vector<sink_ptr>& sinks,
size_t queue_size,
const async_overflow_policy overflow_policy,
const std::function<void()>& worker_warmup_cb,
const std::chrono::milliseconds& flush_interval_ms)
: _formatter(formatter)
, _sinks(sinks)
, _q(queue_size)
, _flush_requested(false)
, _terminate_requested(false)
, _overflow_policy(overflow_policy)
, _worker_warmup_cb(worker_warmup_cb)
, _flush_interval_ms(flush_interval_ms)
, _worker_thread(&async_log_helper::worker_loop, this) {}
// Send to the worker thread termination message(level=off)
// and wait for it to finish gracefully
inline spdlog::details::async_log_helper::~async_log_helper() {
try {
push_msg(async_msg(async_msg_type::terminate));
_worker_thread.join();
} catch (...) // don't crash in destructor
{}
}
// Try to push and block until succeeded
inline void
spdlog::details::async_log_helper::log(const details::log_msg& msg) {
push_msg(async_msg(msg));
}
// Try to push and block until succeeded
inline void
spdlog::details::async_log_helper::push_msg(details::async_log_helper::async_msg&& new_msg) {
throw_if_bad_worker();
if (!_q.enqueue(std::move(new_msg)) &&
_overflow_policy != async_overflow_policy::discard_log_msg) {
auto last_op_time = details::os::now();
auto now = last_op_time;
do {
now = details::os::now();
sleep_or_yield(now, last_op_time);
} while (!_q.enqueue(std::move(new_msg)));
}
}
inline void
spdlog::details::async_log_helper::flush() {
push_msg(async_msg(async_msg_type::flush));
}
inline void
spdlog::details::async_log_helper::worker_loop() {
try {
if (_worker_warmup_cb) _worker_warmup_cb();
auto last_pop = details::os::now();
auto last_flush = last_pop;
while (process_next_msg(last_pop, last_flush))
;
} catch (const std::exception& ex) {
_last_workerthread_ex = std::make_shared<spdlog_ex>(
std::string("async_logger worker thread exception: ") + ex.what());
} catch (...) {
_last_workerthread_ex = std::make_shared<spdlog_ex>("async_logger worker thread exception");
}
}
// process next message in the queue
// return true if this thread should still be active (no msg with level::off was received)
inline bool
spdlog::details::async_log_helper::process_next_msg(log_clock::time_point& last_pop,
log_clock::time_point& last_flush) {
async_msg incoming_async_msg;
log_msg incoming_log_msg;
if (_q.dequeue(incoming_async_msg)) {
last_pop = details::os::now();
switch (incoming_async_msg.msg_type) {
case async_msg_type::flush: _flush_requested = true; break;
case async_msg_type::terminate:
_flush_requested = true;
_terminate_requested = true;
break;
default:
incoming_async_msg.fill_log_msg(incoming_log_msg);
_formatter->format(incoming_log_msg);
for (auto& s : _sinks) s->log(incoming_log_msg);
}
return true;
}
// Handle empty queue..
// This is the only place where the queue can terminate or flush to avoid losing messages
// already in the queue
else {
auto now = details::os::now();
handle_flush_interval(now, last_flush);
sleep_or_yield(now, last_pop);
return !_terminate_requested;
}
}
inline void
spdlog::details::async_log_helper::handle_flush_interval(log_clock::time_point& now,
log_clock::time_point& last_flush) {
auto should_flush =
_flush_requested || (_flush_interval_ms != std::chrono::milliseconds::zero() &&
now - last_flush >= _flush_interval_ms);
if (should_flush) {
for (auto& s : _sinks) s->flush();
now = last_flush = details::os::now();
_flush_requested = false;
}
}
inline void
spdlog::details::async_log_helper::set_formatter(formatter_ptr msg_formatter) {
_formatter = msg_formatter;
}
// sleep,yield or return immediatly using the time passed since last message as a hint
inline void
spdlog::details::async_log_helper::sleep_or_yield(
const spdlog::log_clock::time_point& now, const spdlog::log_clock::time_point& last_op_time) {
using std::chrono::milliseconds;
using namespace std::this_thread;
auto time_since_op = now - last_op_time;
// spin upto 1 ms
if (time_since_op <= milliseconds(1)) return;
// yield upto 10ms
if (time_since_op <= milliseconds(10)) return yield();
// sleep for half of duration since last op
if (time_since_op <= milliseconds(100)) return sleep_for(time_since_op / 2);
return sleep_for(milliseconds(100));
}
// throw if the worker thread threw an exception or not active
inline void
spdlog::details::async_log_helper::throw_if_bad_worker() {
if (_last_workerthread_ex) {
auto ex = std::move(_last_workerthread_ex);
throw * ex;
}
}
@@ -0,0 +1,72 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
// Async Logger implementation
// Use an async_sink (queue per logger) to perform the logging in a worker thread
#include "./async_log_helper.h"
template <class It>
inline spdlog::async_logger::async_logger(const std::string& logger_name,
const It& begin,
const It& end,
size_t queue_size,
const async_overflow_policy overflow_policy,
const std::function<void()>& worker_warmup_cb,
const std::chrono::milliseconds& flush_interval_ms)
: logger(logger_name, begin, end)
, _async_log_helper(new details::async_log_helper(
_formatter, _sinks, queue_size, overflow_policy, worker_warmup_cb, flush_interval_ms)) {}
inline spdlog::async_logger::async_logger(const std::string& logger_name,
sinks_init_list sinks,
size_t queue_size,
const async_overflow_policy overflow_policy,
const std::function<void()>& worker_warmup_cb,
const std::chrono::milliseconds& flush_interval_ms)
: async_logger(logger_name,
sinks.begin(),
sinks.end(),
queue_size,
overflow_policy,
worker_warmup_cb,
flush_interval_ms) {}
inline spdlog::async_logger::async_logger(const std::string& logger_name,
sink_ptr single_sink,
size_t queue_size,
const async_overflow_policy overflow_policy,
const std::function<void()>& worker_warmup_cb,
const std::chrono::milliseconds& flush_interval_ms)
: async_logger(logger_name,
{ single_sink },
queue_size,
overflow_policy,
worker_warmup_cb,
flush_interval_ms) {}
inline void
spdlog::async_logger::flush() {
_async_log_helper->flush();
}
inline void
spdlog::async_logger::_set_formatter(spdlog::formatter_ptr msg_formatter) {
_formatter = msg_formatter;
_async_log_helper->set_formatter(_formatter);
}
inline void
spdlog::async_logger::_set_pattern(const std::string& pattern) {
_formatter = std::make_shared<pattern_formatter>(pattern);
_async_log_helper->set_formatter(_formatter);
}
inline void
spdlog::async_logger::_log_msg(details::log_msg& msg) {
_async_log_helper->log(msg);
}
@@ -0,0 +1,117 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
// Helper class for file sink
// When failing to open a file, retry several times(5) with small delay between the tries(10 ms)
// Can be set to auto flush on every line
// Throw spdlog_ex exception on errors
#include <string>
#include <thread>
#include <chrono>
#include "os.h"
#include "log_msg.h"
namespace spdlog {
namespace details {
class file_helper {
public:
const int open_tries = 5;
const int open_interval = 10;
explicit file_helper(bool force_flush) : _fd(nullptr), _force_flush(force_flush) {}
file_helper(const file_helper&) = delete;
file_helper& operator=(const file_helper&) = delete;
~file_helper() { close(); }
void
open(const std::string& fname, bool truncate = false) {
close();
const char* mode = truncate ? "wb" : "ab";
_filename = fname;
for (int tries = 0; tries < open_tries; ++tries) {
if (!os::fopen_s(&_fd, fname, mode)) return;
std::this_thread::sleep_for(std::chrono::milliseconds(open_interval));
}
throw spdlog_ex("Failed opening file " + fname + " for writing");
}
void
reopen(bool truncate) {
if (_filename.empty()) throw spdlog_ex("Failed re opening file - was not opened before");
open(_filename, truncate);
}
void
flush() {
std::fflush(_fd);
}
void
close() {
if (_fd) {
std::fclose(_fd);
_fd = nullptr;
}
}
void
write_string(const std::string& msg) {
if (std::fwrite(msg.c_str(), 1, msg.size(), _fd) != msg.size())
throw spdlog_ex("Failed writing to file " + _filename);
if (_force_flush) std::fflush(_fd);
}
void
write(const log_msg& msg) {
size_t msg_size = msg.formatted.size();
auto data = msg.formatted.data();
if (std::fwrite(data, 1, msg_size, _fd) != msg_size)
throw spdlog_ex("Failed writing to file " + _filename);
if (_force_flush) std::fflush(_fd);
}
long
size() {
if (!_fd) throw spdlog_ex("Cannot use size() on closed file " + _filename);
auto pos = ftell(_fd);
if (fseek(_fd, 0, SEEK_END) != 0) throw spdlog_ex("fseek failed on file " + _filename);
auto file_size = ftell(_fd);
if (fseek(_fd, pos, SEEK_SET) != 0) throw spdlog_ex("fseek failed on file " + _filename);
if (file_size == -1) throw spdlog_ex("ftell failed on file " + _filename);
return file_size;
}
const std::string&
filename() const {
return _filename;
}
static bool
file_exists(const std::string& name) {
return os::file_exists(name);
}
private:
FILE* _fd;
std::string _filename;
bool _force_flush;
};
}
}
@@ -0,0 +1,934 @@
/*
Formatting library for C++
Copyright (c) 2012 - 2015, Victor Zverovich
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "format.h"
#include <string.h>
#include <cctype>
#include <cerrno>
#include <climits>
#include <cmath>
#include <cstdarg>
#include <cstddef> // for std::ptrdiff_t
#if defined(_WIN32) && defined(__MINGW32__)
#include <cstring>
#endif
#if FMT_USE_WINDOWS_H
#if defined(NOMINMAX) || defined(FMT_WIN_MINMAX)
#include <windows.h>
#else
#define NOMINMAX
#include <windows.h>
#undef NOMINMAX
#endif
#endif
using fmt::internal::Arg;
#if FMT_EXCEPTIONS
#define FMT_TRY try
#define FMT_CATCH(x) catch (x)
#else
#define FMT_TRY if (true)
#define FMT_CATCH(x) if (false)
#endif
#ifdef FMT_HEADER_ONLY
#define FMT_FUNC inline
#else
#define FMT_FUNC
#endif
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4127) // conditional expression is constant
#pragma warning(disable : 4702) // unreachable code
// Disable deprecation warning for strerror. The latter is not called but
// MSVC fails to detect it.
#pragma warning(disable : 4996)
#endif
// Dummy implementations of strerror_r and strerror_s called if corresponding
// system functions are not available.
static inline fmt::internal::Null<>
strerror_r(int, char*, ...) {
return fmt::internal::Null<>();
}
static inline fmt::internal::Null<>
strerror_s(char*, std::size_t, ...) {
return fmt::internal::Null<>();
}
namespace fmt {
namespace {
#ifndef _MSC_VER
#define FMT_SNPRINTF snprintf
#else // _MSC_VER
inline int
fmt_snprintf(char* buffer, size_t size, const char* format, ...) {
va_list args;
va_start(args, format);
int result = vsnprintf_s(buffer, size, _TRUNCATE, format, args);
va_end(args);
return result;
}
#define FMT_SNPRINTF fmt_snprintf
#endif // _MSC_VER
#if defined(_WIN32) && defined(__MINGW32__) && !defined(__NO_ISOCEXT)
#define FMT_SWPRINTF snwprintf
#else
#define FMT_SWPRINTF swprintf
#endif // defined(_WIN32) && defined(__MINGW32__) && !defined(__NO_ISOCEXT)
// Checks if a value fits in int - used to avoid warnings about comparing
// signed and unsigned integers.
template <bool IsSigned>
struct IntChecker {
template <typename T>
static bool
fits_in_int(T value) {
unsigned max = INT_MAX;
return value <= max;
}
static bool
fits_in_int(bool) {
return true;
}
};
template <>
struct IntChecker<true> {
template <typename T>
static bool
fits_in_int(T value) {
return value >= INT_MIN && value <= INT_MAX;
}
static bool
fits_in_int(int) {
return true;
}
};
const char RESET_COLOR[] = "\x1b[0m";
typedef void (*FormatFunc)(fmt::Writer&, int, fmt::StringRef);
// Portable thread-safe version of strerror.
// Sets buffer to point to a string describing the error code.
// This can be either a pointer to a string stored in buffer,
// or a pointer to some static immutable string.
// Returns one of the following values:
// 0 - success
// ERANGE - buffer is not large enough to store the error message
// other - failure
// Buffer should be at least of size 1.
int
safe_strerror(int error_code, char*& buffer, std::size_t buffer_size) FMT_NOEXCEPT {
FMT_ASSERT(buffer != 0 && buffer_size != 0, "invalid buffer");
class StrError {
private:
int error_code_;
char*& buffer_;
std::size_t buffer_size_;
// A noop assignment operator to avoid bogus warnings.
void operator=(const StrError&) {}
// Handle the result of XSI-compliant version of strerror_r.
int
handle(int result) {
// glibc versions before 2.13 return result in errno.
return result == -1 ? errno : result;
}
// Handle the result of GNU-specific version of strerror_r.
int
handle(char* message) {
// If the buffer is full then the message is probably truncated.
if (message == buffer_ && strlen(buffer_) == buffer_size_ - 1) return ERANGE;
buffer_ = message;
return 0;
}
// Handle the case when strerror_r is not available.
int handle(fmt::internal::Null<>) {
return fallback(strerror_s(buffer_, buffer_size_, error_code_));
}
// Fallback to strerror_s when strerror_r is not available.
int
fallback(int result) {
// If the buffer is full then the message is probably truncated.
return result == 0 && strlen(buffer_) == buffer_size_ - 1 ? ERANGE : result;
}
// Fallback to strerror if strerror_r and strerror_s are not available.
int fallback(fmt::internal::Null<>) {
errno = 0;
buffer_ = strerror(error_code_);
return errno;
}
public:
StrError(int err_code, char*& buf, std::size_t buf_size)
: error_code_(err_code), buffer_(buf), buffer_size_(buf_size) {}
int
run() {
strerror_r(0, 0, ""); // Suppress a warning about unused strerror_r.
return handle(strerror_r(error_code_, buffer_, buffer_size_));
}
};
return StrError(error_code, buffer, buffer_size).run();
}
void
format_error_code(fmt::Writer& out, int error_code, fmt::StringRef message) FMT_NOEXCEPT {
// Report error code making sure that the output fits into
// INLINE_BUFFER_SIZE to avoid dynamic memory allocation and potential
// bad_alloc.
out.clear();
static const char SEP[] = ": ";
static const char ERROR_STR[] = "error ";
fmt::internal::IntTraits<int>::MainType ec_value = error_code;
// Subtract 2 to account for terminating null characters in SEP and ERROR_STR.
std::size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2;
error_code_size += fmt::internal::count_digits(ec_value);
if (message.size() <= fmt::internal::INLINE_BUFFER_SIZE - error_code_size)
out << message << SEP;
out << ERROR_STR << error_code;
assert(out.size() <= fmt::internal::INLINE_BUFFER_SIZE);
}
void
report_error(FormatFunc func, int error_code, fmt::StringRef message) FMT_NOEXCEPT {
fmt::MemoryWriter full_message;
func(full_message, error_code, message);
// Use Writer::data instead of Writer::c_str to avoid potential memory
// allocation.
std::fwrite(full_message.data(), full_message.size(), 1, stderr);
std::fputc('\n', stderr);
}
// IsZeroInt::visit(arg) returns true iff arg is a zero integer.
class IsZeroInt : public fmt::internal::ArgVisitor<IsZeroInt, bool> {
public:
template <typename T>
bool
visit_any_int(T value) {
return value == 0;
}
};
// Checks if an argument is a valid printf width specifier and sets
// left alignment if it is negative.
class WidthHandler : public fmt::internal::ArgVisitor<WidthHandler, unsigned> {
private:
fmt::FormatSpec& spec_;
FMT_DISALLOW_COPY_AND_ASSIGN(WidthHandler);
public:
explicit WidthHandler(fmt::FormatSpec& spec) : spec_(spec) {}
void
report_unhandled_arg() {
FMT_THROW(fmt::FormatError("width is not integer"));
}
template <typename T>
unsigned
visit_any_int(T value) {
typedef typename fmt::internal::IntTraits<T>::MainType UnsignedType;
UnsignedType width = value;
if (fmt::internal::is_negative(value)) {
spec_.align_ = fmt::ALIGN_LEFT;
width = 0 - width;
}
if (width > INT_MAX) FMT_THROW(fmt::FormatError("number is too big"));
return static_cast<unsigned>(width);
}
};
class PrecisionHandler : public fmt::internal::ArgVisitor<PrecisionHandler, int> {
public:
void
report_unhandled_arg() {
FMT_THROW(fmt::FormatError("precision is not integer"));
}
template <typename T>
int
visit_any_int(T value) {
if (!IntChecker<std::numeric_limits<T>::is_signed>::fits_in_int(value))
FMT_THROW(fmt::FormatError("number is too big"));
return static_cast<int>(value);
}
};
// Converts an integer argument to an integral type T for printf.
template <typename T>
class ArgConverter : public fmt::internal::ArgVisitor<ArgConverter<T>, void> {
private:
fmt::internal::Arg& arg_;
wchar_t type_;
FMT_DISALLOW_COPY_AND_ASSIGN(ArgConverter);
public:
ArgConverter(fmt::internal::Arg& arg, wchar_t type) : arg_(arg), type_(type) {}
void
visit_bool(bool value) {
if (type_ != 's') visit_any_int(value);
}
template <typename U>
void
visit_any_int(U value) {
bool is_signed = type_ == 'd' || type_ == 'i';
using fmt::internal::Arg;
if (sizeof(T) <= sizeof(int)) {
// Extra casts are used to silence warnings.
if (is_signed) {
arg_.type = Arg::INT;
arg_.int_value = static_cast<int>(static_cast<T>(value));
} else {
arg_.type = Arg::UINT;
arg_.uint_value = static_cast<unsigned>(
static_cast<typename fmt::internal::MakeUnsigned<T>::Type>(value));
}
} else {
if (is_signed) {
arg_.type = Arg::LONG_LONG;
arg_.long_long_value =
static_cast<typename fmt::internal::MakeUnsigned<U>::Type>(value);
} else {
arg_.type = Arg::ULONG_LONG;
arg_.ulong_long_value =
static_cast<typename fmt::internal::MakeUnsigned<U>::Type>(value);
}
}
}
};
// Converts an integer argument to char for printf.
class CharConverter : public fmt::internal::ArgVisitor<CharConverter, void> {
private:
fmt::internal::Arg& arg_;
FMT_DISALLOW_COPY_AND_ASSIGN(CharConverter);
public:
explicit CharConverter(fmt::internal::Arg& arg) : arg_(arg) {}
template <typename T>
void
visit_any_int(T value) {
arg_.type = Arg::CHAR;
arg_.int_value = static_cast<char>(value);
}
};
} // namespace
namespace internal {
template <typename Char>
class PrintfArgFormatter : public ArgFormatterBase<PrintfArgFormatter<Char>, Char> {
void
write_null_pointer() {
this->spec().type_ = 0;
this->write("(nil)");
}
typedef ArgFormatterBase<PrintfArgFormatter<Char>, Char> Base;
public:
PrintfArgFormatter(BasicWriter<Char>& w, FormatSpec& s)
: ArgFormatterBase<PrintfArgFormatter<Char>, Char>(w, s) {}
void
visit_bool(bool value) {
FormatSpec& fmt_spec = this->spec();
if (fmt_spec.type_ != 's') return this->visit_any_int(value);
fmt_spec.type_ = 0;
this->write(value);
}
void
visit_char(int value) {
const FormatSpec& fmt_spec = this->spec();
BasicWriter<Char>& w = this->writer();
if (fmt_spec.type_ && fmt_spec.type_ != 'c') w.write_int(value, fmt_spec);
typedef typename BasicWriter<Char>::CharPtr CharPtr;
CharPtr out = CharPtr();
if (fmt_spec.width_ > 1) {
Char fill = ' ';
out = w.grow_buffer(fmt_spec.width_);
if (fmt_spec.align_ != ALIGN_LEFT) {
std::fill_n(out, fmt_spec.width_ - 1, fill);
out += fmt_spec.width_ - 1;
} else {
std::fill_n(out + 1, fmt_spec.width_ - 1, fill);
}
} else {
out = w.grow_buffer(1);
}
*out = static_cast<Char>(value);
}
void
visit_cstring(const char* value) {
if (value)
Base::visit_cstring(value);
else if (this->spec().type_ == 'p')
write_null_pointer();
else
this->write("(null)");
}
void
visit_pointer(const void* value) {
if (value) return Base::visit_pointer(value);
this->spec().type_ = 0;
write_null_pointer();
}
void
visit_custom(Arg::CustomValue c) {
BasicFormatter<Char> formatter(ArgList(), this->writer());
const Char format_str[] = { '}', 0 };
const Char* format = format_str;
c.format(&formatter, c.value, &format);
}
};
} // namespace internal
} // namespace fmt
FMT_FUNC void
fmt::SystemError::init(int err_code, CStringRef format_str, ArgList args) {
error_code_ = err_code;
MemoryWriter w;
internal::format_system_error(w, err_code, format(format_str, args));
std::runtime_error& base = *this;
base = std::runtime_error(w.str());
}
template <typename T>
int
fmt::internal::CharTraits<char>::format_float(
char* buffer, std::size_t size, const char* format, unsigned width, int precision, T value) {
if (width == 0) {
return precision < 0 ? FMT_SNPRINTF(buffer, size, format, value)
: FMT_SNPRINTF(buffer, size, format, precision, value);
}
return precision < 0 ? FMT_SNPRINTF(buffer, size, format, width, value)
: FMT_SNPRINTF(buffer, size, format, width, precision, value);
}
template <typename T>
int
fmt::internal::CharTraits<wchar_t>::format_float(wchar_t* buffer,
std::size_t size,
const wchar_t* format,
unsigned width,
int precision,
T value) {
if (width == 0) {
return precision < 0 ? FMT_SWPRINTF(buffer, size, format, value)
: FMT_SWPRINTF(buffer, size, format, precision, value);
}
return precision < 0 ? FMT_SWPRINTF(buffer, size, format, width, value)
: FMT_SWPRINTF(buffer, size, format, width, precision, value);
}
template <typename T>
const char fmt::internal::BasicData<T>::DIGITS[] =
"0001020304050607080910111213141516171819"
"2021222324252627282930313233343536373839"
"4041424344454647484950515253545556575859"
"6061626364656667686970717273747576777879"
"8081828384858687888990919293949596979899";
#define FMT_POWERS_OF_10(factor) \
factor * 10, factor * 100, factor * 1000, factor * 10000, factor * 100000, factor * 1000000, \
factor * 10000000, factor * 100000000, factor * 1000000000
template <typename T>
const uint32_t fmt::internal::BasicData<T>::POWERS_OF_10_32[] = { 0, FMT_POWERS_OF_10(1) };
template <typename T>
const uint64_t fmt::internal::BasicData<T>::POWERS_OF_10_64[] = {
0,
FMT_POWERS_OF_10(1),
FMT_POWERS_OF_10(fmt::ULongLong(1000000000)),
// Multiply several constants instead of using a single long long constant
// to avoid warnings about C++98 not supporting long long.
fmt::ULongLong(1000000000) * fmt::ULongLong(1000000000) * 10
};
FMT_FUNC void
fmt::internal::report_unknown_type(char code, const char* type) {
(void)type;
if (std::isprint(static_cast<unsigned char>(code))) {
FMT_THROW(fmt::FormatError(fmt::format("unknown format code '{}' for {}", code, type)));
}
FMT_THROW(fmt::FormatError(
fmt::format("unknown format code '\\x{:02x}' for {}", static_cast<unsigned>(code), type)));
}
#if FMT_USE_WINDOWS_H
FMT_FUNC
fmt::internal::UTF8ToUTF16::UTF8ToUTF16(fmt::StringRef s) {
static const char ERROR_MSG[] = "cannot convert string from UTF-8 to UTF-16";
if (s.size() > INT_MAX) FMT_THROW(WindowsError(ERROR_INVALID_PARAMETER, ERROR_MSG));
int s_size = static_cast<int>(s.size());
int length = MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, s.data(), s_size, 0, 0);
if (length == 0) FMT_THROW(WindowsError(GetLastError(), ERROR_MSG));
buffer_.resize(length + 1);
length =
MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, s.data(), s_size, &buffer_[0], length);
if (length == 0) FMT_THROW(WindowsError(GetLastError(), ERROR_MSG));
buffer_[length] = 0;
}
FMT_FUNC
fmt::internal::UTF16ToUTF8::UTF16ToUTF8(fmt::WStringRef s) {
if (int error_code = convert(s)) {
FMT_THROW(WindowsError(error_code, "cannot convert string from UTF-16 to UTF-8"));
}
}
FMT_FUNC int
fmt::internal::UTF16ToUTF8::convert(fmt::WStringRef s) {
if (s.size() > INT_MAX) return ERROR_INVALID_PARAMETER;
int s_size = static_cast<int>(s.size());
int length = WideCharToMultiByte(CP_UTF8, 0, s.data(), s_size, 0, 0, 0, 0);
if (length == 0) return GetLastError();
buffer_.resize(length + 1);
length = WideCharToMultiByte(CP_UTF8, 0, s.data(), s_size, &buffer_[0], length, 0, 0);
if (length == 0) return GetLastError();
buffer_[length] = 0;
return 0;
}
FMT_FUNC void
fmt::WindowsError::init(int err_code, CStringRef format_str, ArgList args) {
error_code_ = err_code;
MemoryWriter w;
internal::format_windows_error(w, err_code, format(format_str, args));
std::runtime_error& base = *this;
base = std::runtime_error(w.str());
}
FMT_FUNC void
fmt::internal::format_windows_error(fmt::Writer& out,
int error_code,
fmt::StringRef message) FMT_NOEXCEPT {
class String {
private:
LPWSTR str_;
public:
String() : str_() {}
~String() { LocalFree(str_); }
LPWSTR*
ptr() {
return &str_;
}
LPCWSTR
c_str() const {
return str_;
}
};
FMT_TRY {
String system_message;
if (FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
0,
error_code,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
reinterpret_cast<LPWSTR>(system_message.ptr()),
0,
0)) {
UTF16ToUTF8 utf8_message;
if (utf8_message.convert(system_message.c_str()) == ERROR_SUCCESS) {
out << message << ": " << utf8_message;
return;
}
}
}
FMT_CATCH(...) {}
fmt::format_error_code(out, error_code, message); // 'fmt::' is for bcc32.
}
#endif // FMT_USE_WINDOWS_H
FMT_FUNC void
fmt::internal::format_system_error(fmt::Writer& out,
int error_code,
fmt::StringRef message) FMT_NOEXCEPT {
FMT_TRY {
MemoryBuffer<char, INLINE_BUFFER_SIZE> buffer;
buffer.resize(INLINE_BUFFER_SIZE);
for (;;) {
char* system_message = &buffer[0];
int result = safe_strerror(error_code, system_message, buffer.size());
if (result == 0) {
out << message << ": " << system_message;
return;
}
if (result != ERANGE) break; // Can't get error message, report error code instead.
buffer.resize(buffer.size() * 2);
}
}
FMT_CATCH(...) {}
fmt::format_error_code(out, error_code, message); // 'fmt::' is for bcc32.
}
template <typename Char>
void
fmt::internal::ArgMap<Char>::init(const ArgList& args) {
if (!map_.empty()) return;
typedef internal::NamedArg<Char> NamedArg;
const NamedArg* named_arg = 0;
bool use_values = args.type(ArgList::MAX_PACKED_ARGS - 1) == internal::Arg::NONE;
if (use_values) {
for (unsigned i = 0; /*nothing*/; ++i) {
internal::Arg::Type arg_type = args.type(i);
switch (arg_type) {
case internal::Arg::NONE: return;
case internal::Arg::NAMED_ARG:
named_arg = static_cast<const NamedArg*>(args.values_[i].pointer);
map_.insert(Pair(named_arg->name, *named_arg));
break;
default:
/*nothing*/
;
}
}
return;
}
for (unsigned i = 0; i != ArgList::MAX_PACKED_ARGS; ++i) {
internal::Arg::Type arg_type = args.type(i);
if (arg_type == internal::Arg::NAMED_ARG) {
named_arg = static_cast<const NamedArg*>(args.args_[i].pointer);
map_.insert(Pair(named_arg->name, *named_arg));
}
}
for (unsigned i = ArgList::MAX_PACKED_ARGS; /*nothing*/; ++i) {
switch (args.args_[i].type) {
case internal::Arg::NONE: return;
case internal::Arg::NAMED_ARG:
named_arg = static_cast<const NamedArg*>(args.args_[i].pointer);
map_.insert(Pair(named_arg->name, *named_arg));
break;
default:
/*nothing*/
;
}
}
}
template <typename Char>
void fmt::internal::FixedBuffer<Char>::grow(std::size_t) {
FMT_THROW(std::runtime_error("buffer overflow"));
}
FMT_FUNC Arg
fmt::internal::FormatterBase::do_get_arg(unsigned arg_index, const char*& error) {
Arg arg = args_[arg_index];
switch (arg.type) {
case Arg::NONE: error = "argument index out of range"; break;
case Arg::NAMED_ARG: arg = *static_cast<const internal::Arg*>(arg.pointer);
default:
/*nothing*/
;
}
return arg;
}
template <typename Char>
void
fmt::internal::PrintfFormatter<Char>::parse_flags(FormatSpec& spec, const Char*& s) {
for (;;) {
switch (*s++) {
case '-': spec.align_ = ALIGN_LEFT; break;
case '+': spec.flags_ |= SIGN_FLAG | PLUS_FLAG; break;
case '0': spec.fill_ = '0'; break;
case ' ': spec.flags_ |= SIGN_FLAG; break;
case '#': spec.flags_ |= HASH_FLAG; break;
default: --s; return;
}
}
}
template <typename Char>
Arg
fmt::internal::PrintfFormatter<Char>::get_arg(const Char* s, unsigned arg_index) {
(void)s;
const char* error = 0;
Arg arg =
arg_index == UINT_MAX ? next_arg(error) : FormatterBase::get_arg(arg_index - 1, error);
if (error) FMT_THROW(FormatError(!*s ? "invalid format string" : error));
return arg;
}
template <typename Char>
unsigned
fmt::internal::PrintfFormatter<Char>::parse_header(const Char*& s, FormatSpec& spec) {
unsigned arg_index = UINT_MAX;
Char c = *s;
if (c >= '0' && c <= '9') {
// Parse an argument index (if followed by '$') or a width possibly
// preceded with '0' flag(s).
unsigned value = parse_nonnegative_int(s);
if (*s == '$') { // value is an argument index
++s;
arg_index = value;
} else {
if (c == '0') spec.fill_ = '0';
if (value != 0) {
// Nonzero value means that we parsed width and don't need to
// parse it or flags again, so return now.
spec.width_ = value;
return arg_index;
}
}
}
parse_flags(spec, s);
// Parse width.
if (*s >= '0' && *s <= '9') {
spec.width_ = parse_nonnegative_int(s);
} else if (*s == '*') {
++s;
spec.width_ = WidthHandler(spec).visit(get_arg(s));
}
return arg_index;
}
template <typename Char>
void
fmt::internal::PrintfFormatter<Char>::format(BasicWriter<Char>& writer,
BasicCStringRef<Char> format_str) {
const Char* start = format_str.c_str();
const Char* s = start;
while (*s) {
Char c = *s++;
if (c != '%') continue;
if (*s == c) {
write(writer, start, s);
start = ++s;
continue;
}
write(writer, start, s - 1);
FormatSpec spec;
spec.align_ = ALIGN_RIGHT;
// Parse argument index, flags and width.
unsigned arg_index = parse_header(s, spec);
// Parse precision.
if (*s == '.') {
++s;
if ('0' <= *s && *s <= '9') {
spec.precision_ = parse_nonnegative_int(s);
} else if (*s == '*') {
++s;
spec.precision_ = PrecisionHandler().visit(get_arg(s));
}
}
Arg arg = get_arg(s, arg_index);
if (spec.flag(HASH_FLAG) && IsZeroInt().visit(arg)) spec.flags_ &= ~HASH_FLAG;
if (spec.fill_ == '0') {
if (arg.type <= Arg::LAST_NUMERIC_TYPE)
spec.align_ = ALIGN_NUMERIC;
else
spec.fill_ = ' '; // Ignore '0' flag for non-numeric types.
}
// Parse length and convert the argument to the required type.
switch (*s++) {
case 'h':
if (*s == 'h')
ArgConverter<signed char>(arg, *++s).visit(arg);
else
ArgConverter<short>(arg, *s).visit(arg);
break;
case 'l':
if (*s == 'l')
ArgConverter<fmt::LongLong>(arg, *++s).visit(arg);
else
ArgConverter<long>(arg, *s).visit(arg);
break;
case 'j': ArgConverter<intmax_t>(arg, *s).visit(arg); break;
case 'z': ArgConverter<std::size_t>(arg, *s).visit(arg); break;
case 't': ArgConverter<std::ptrdiff_t>(arg, *s).visit(arg); break;
case 'L':
// printf produces garbage when 'L' is omitted for long double, no
// need to do the same.
break;
default: --s; ArgConverter<int>(arg, *s).visit(arg);
}
// Parse type.
if (!*s) FMT_THROW(FormatError("invalid format string"));
spec.type_ = static_cast<char>(*s++);
if (arg.type <= Arg::LAST_INTEGER_TYPE) {
// Normalize type.
switch (spec.type_) {
case 'i':
case 'u': spec.type_ = 'd'; break;
case 'c':
// TODO: handle wchar_t
CharConverter(arg).visit(arg);
break;
}
}
start = s;
// Format argument.
internal::PrintfArgFormatter<Char>(writer, spec).visit(arg);
}
write(writer, start, s);
}
FMT_FUNC void
fmt::report_system_error(int error_code, fmt::StringRef message) FMT_NOEXCEPT {
// 'fmt::' is for bcc32.
fmt::report_error(internal::format_system_error, error_code, message);
}
#if FMT_USE_WINDOWS_H
FMT_FUNC void
fmt::report_windows_error(int error_code, fmt::StringRef message) FMT_NOEXCEPT {
// 'fmt::' is for bcc32.
fmt::report_error(internal::format_windows_error, error_code, message);
}
#endif
FMT_FUNC void
fmt::print(std::FILE* f, CStringRef format_str, ArgList args) {
MemoryWriter w;
w.write(format_str, args);
std::fwrite(w.data(), 1, w.size(), f);
}
FMT_FUNC void
fmt::print(CStringRef format_str, ArgList args) {
print(stdout, format_str, args);
}
FMT_FUNC void
fmt::print(std::ostream& os, CStringRef format_str, ArgList args) {
MemoryWriter w;
w.write(format_str, args);
os.write(w.data(), w.size());
}
FMT_FUNC void
fmt::print_colored(Color c, CStringRef format, ArgList args) {
char escape[] = "\x1b[30m";
escape[3] = static_cast<char>('0' + c);
std::fputs(escape, stdout);
print(format, args);
std::fputs(RESET_COLOR, stdout);
}
FMT_FUNC int
fmt::fprintf(std::FILE* f, CStringRef format, ArgList args) {
MemoryWriter w;
printf(w, format, args);
std::size_t size = w.size();
return std::fwrite(w.data(), 1, size, f) < size ? -1 : static_cast<int>(size);
}
#ifndef FMT_HEADER_ONLY
template struct fmt::internal::BasicData<void>;
// Explicit instantiations for char.
template void fmt::internal::FixedBuffer<char>::grow(std::size_t);
template void fmt::internal::ArgMap<char>::init(const fmt::ArgList& args);
template void fmt::internal::PrintfFormatter<char>::format(BasicWriter<char>& writer,
CStringRef format);
template int fmt::internal::CharTraits<char>::format_float(char* buffer,
std::size_t size,
const char* format,
unsigned width,
int precision,
double value);
template int fmt::internal::CharTraits<char>::format_float(char* buffer,
std::size_t size,
const char* format,
unsigned width,
int precision,
long double value);
// Explicit instantiations for wchar_t.
template void fmt::internal::FixedBuffer<wchar_t>::grow(std::size_t);
template void fmt::internal::ArgMap<wchar_t>::init(const fmt::ArgList& args);
template void fmt::internal::PrintfFormatter<wchar_t>::format(BasicWriter<wchar_t>& writer,
WCStringRef format);
template int fmt::internal::CharTraits<wchar_t>::format_float(wchar_t* buffer,
std::size_t size,
const wchar_t* format,
unsigned width,
int precision,
double value);
template int fmt::internal::CharTraits<wchar_t>::format_float(wchar_t* buffer,
std::size_t size,
const wchar_t* format,
unsigned width,
int precision,
long double value);
#endif // FMT_HEADER_ONLY
#ifdef _MSC_VER
#pragma warning(pop)
#endif
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,156 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <type_traits>
#include "../common.h"
#include "../logger.h"
// Line logger class - aggregates operator<< calls to fast ostream
// and logs upon destruction
namespace spdlog {
namespace details {
class line_logger {
public:
line_logger(logger* callback_logger, level::level_enum msg_level, bool enabled)
: _callback_logger(callback_logger), _log_msg(msg_level), _enabled(enabled) {}
// No copy intended. Only move
line_logger(const line_logger& other) = delete;
line_logger& operator=(const line_logger&) = delete;
line_logger& operator=(line_logger&&) = delete;
line_logger(line_logger&& other)
: _callback_logger(other._callback_logger)
, _log_msg(std::move(other._log_msg))
, _enabled(other._enabled) {
other.disable();
}
// Log the log message using the callback logger
~line_logger() {
if (_enabled) {
#ifndef SPDLOG_NO_NAME
_log_msg.logger_name = _callback_logger->name();
#endif
#ifndef SPDLOG_NO_DATETIME
_log_msg.time = os::now();
#endif
#ifndef SPDLOG_NO_THREAD_ID
_log_msg.thread_id = os::thread_id();
#endif
_callback_logger->_log_msg(_log_msg);
}
}
//
// Support for format string with variadic args
//
void
write(const char* what) {
if (_enabled) _log_msg.raw << what;
}
template <typename... Args>
void
write(const char* fmt, const Args&... args) {
if (!_enabled) return;
try {
_log_msg.raw.write(fmt, args...);
} catch (const fmt::FormatError& e) {
throw spdlog_ex(fmt::format(
"formatting error while processing format string '{}': {}", fmt, e.what()));
}
}
//
// Support for operator<<
//
line_logger& operator<<(const char* what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(const std::string& what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(int what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(unsigned int what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(long what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(unsigned long what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(long long what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(unsigned long long what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(double what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(long double what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(float what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
line_logger& operator<<(char what) {
if (_enabled) _log_msg.raw << what;
return *this;
}
// Support user types which implements operator<<
template <typename T>
line_logger& operator<<(const T& what) {
if (_enabled) _log_msg.raw.write("{}", what);
return *this;
}
void
disable() {
_enabled = false;
}
bool
is_enabled() const {
return _enabled;
}
private:
logger* _callback_logger;
log_msg _log_msg;
bool _enabled;
};
} // Namespace details
} // Namespace spdlog
@@ -0,0 +1,79 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <thread>
#include "../common.h"
#include "./format.h"
namespace spdlog
{
namespace details
{
struct log_msg
{
log_msg() = default;
log_msg(level::level_enum l):
logger_name(),
level(l),
raw(),
formatted() {}
log_msg(const log_msg& other) :
logger_name(other.logger_name),
level(other.level),
time(other.time),
thread_id(other.thread_id)
{
if (other.raw.size())
raw << fmt::BasicStringRef<char>(other.raw.data(), other.raw.size());
if (other.formatted.size())
formatted << fmt::BasicStringRef<char>(other.formatted.data(), other.formatted.size());
}
log_msg(log_msg&& other) :
logger_name(std::move(other.logger_name)),
level(other.level),
time(std::move(other.time)),
thread_id(other.thread_id),
raw(std::move(other.raw)),
formatted(std::move(other.formatted))
{
other.clear();
}
log_msg& operator=(log_msg&& other)
{
if (this == &other)
return *this;
logger_name = std::move(other.logger_name);
level = other.level;
time = std::move(other.time);
thread_id = other.thread_id;
raw = std::move(other.raw);
formatted = std::move(other.formatted);
other.clear();
return *this;
}
void clear()
{
level = level::off;
raw.clear();
formatted.clear();
}
std::string logger_name;
level::level_enum level;
log_clock::time_point time;
size_t thread_id;
fmt::MemoryWriter raw;
fmt::MemoryWriter formatted;
};
}
}
@@ -0,0 +1,299 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include "./line_logger.h"
// create logger with given name, sinks and the default pattern formatter
// all other ctors will call this one
template<class It>
inline spdlog::logger::logger(const std::string& logger_name, const It& begin, const It& end) :
_name(logger_name),
_sinks(begin, end),
_formatter(std::make_shared<pattern_formatter>("%+"))
{
// no support under vs2013 for member initialization for std::atomic
_level = level::info;
}
// ctor with sinks as init list
inline spdlog::logger::logger(const std::string& logger_name, sinks_init_list sinks_list) :
logger(logger_name, sinks_list.begin(), sinks_list.end()) {}
// ctor with single sink
inline spdlog::logger::logger(const std::string& logger_name, spdlog::sink_ptr single_sink) :
logger(logger_name,
{
single_sink
}) {}
inline spdlog::logger::~logger() = default;
inline void spdlog::logger::set_formatter(spdlog::formatter_ptr msg_formatter)
{
_set_formatter(msg_formatter);
}
inline void spdlog::logger::set_pattern(const std::string& pattern)
{
_set_pattern(pattern);
}
//
// log only if given level>=logger's log level
//
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::_log_if_enabled(level::level_enum lvl, const char* fmt, const Args&... args)
{
bool msg_enabled = should_log(lvl);
details::line_logger l(this, lvl, msg_enabled);
l.write(fmt, args...);
return l;
}
inline spdlog::details::line_logger spdlog::logger::_log_if_enabled(level::level_enum lvl)
{
return details::line_logger(this, lvl, should_log(lvl));
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::_log_if_enabled(level::level_enum lvl, const T& msg)
{
bool msg_enabled = should_log(lvl);
details::line_logger l(this, lvl, msg_enabled);
l << msg;
return l;
}
//
// logger.info(cppformat_string, arg1, arg2, arg3, ...) call style
//
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::trace(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::trace, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::debug(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::debug, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::info(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::info, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::notice(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::notice, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::warn(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::warn, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::error(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::err, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::critical(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::critical, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::alert(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::alert, fmt, args...);
}
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::emerg(const char* fmt, const Args&... args)
{
return _log_if_enabled(level::emerg, fmt, args...);
}
//
// logger.info(msg) << ".." call style
//
template<typename T>
inline spdlog::details::line_logger spdlog::logger::trace(const T& msg)
{
return _log_if_enabled(level::trace, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::debug(const T& msg)
{
return _log_if_enabled(level::debug, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::info(const T& msg)
{
return _log_if_enabled(level::info, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::notice(const T& msg)
{
return _log_if_enabled(level::notice, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::warn(const T& msg)
{
return _log_if_enabled(level::warn, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::error(const T& msg)
{
return _log_if_enabled(level::err, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::critical(const T& msg)
{
return _log_if_enabled(level::critical, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::alert(const T& msg)
{
return _log_if_enabled(level::alert, msg);
}
template<typename T>
inline spdlog::details::line_logger spdlog::logger::emerg(const T& msg)
{
return _log_if_enabled(level::emerg, msg);
}
//
// logger.info() << ".." call style
//
inline spdlog::details::line_logger spdlog::logger::trace()
{
return _log_if_enabled(level::trace);
}
inline spdlog::details::line_logger spdlog::logger::debug()
{
return _log_if_enabled(level::debug);
}
inline spdlog::details::line_logger spdlog::logger::info()
{
return _log_if_enabled(level::info);
}
inline spdlog::details::line_logger spdlog::logger::notice()
{
return _log_if_enabled(level::notice);
}
inline spdlog::details::line_logger spdlog::logger::warn()
{
return _log_if_enabled(level::warn);
}
inline spdlog::details::line_logger spdlog::logger::error()
{
return _log_if_enabled(level::err);
}
inline spdlog::details::line_logger spdlog::logger::critical()
{
return _log_if_enabled(level::critical);
}
inline spdlog::details::line_logger spdlog::logger::alert()
{
return _log_if_enabled(level::alert);
}
inline spdlog::details::line_logger spdlog::logger::emerg()
{
return _log_if_enabled(level::emerg);
}
// always log, no matter what is the actual logger's log level
template <typename... Args>
inline spdlog::details::line_logger spdlog::logger::force_log(level::level_enum lvl, const char* fmt, const Args&... args)
{
details::line_logger l(this, lvl, true);
l.write(fmt, args...);
return l;
}
//
// name and level
//
inline const std::string& spdlog::logger::name() const
{
return _name;
}
inline void spdlog::logger::set_level(spdlog::level::level_enum log_level)
{
_level.store(log_level);
}
inline spdlog::level::level_enum spdlog::logger::level() const
{
return static_cast<spdlog::level::level_enum>(_level.load(std::memory_order_relaxed));
}
inline bool spdlog::logger::should_log(spdlog::level::level_enum msg_level) const
{
return msg_level >= _level.load(std::memory_order_relaxed);
}
//
// protected virtual called at end of each user log call (if enabled) by the line_logger
//
inline void spdlog::logger::_log_msg(details::log_msg& msg)
{
_formatter->format(msg);
for (auto &sink : _sinks)
sink->log(msg);
}
inline void spdlog::logger::_set_pattern(const std::string& pattern)
{
_formatter = std::make_shared<pattern_formatter>(pattern);
}
inline void spdlog::logger::_set_formatter(formatter_ptr msg_formatter)
{
_formatter = msg_formatter;
}
inline void spdlog::logger::flush()
{
for (auto& sink : _sinks)
sink->flush();
}
@@ -0,0 +1,157 @@
/*
A modified version of Bounded MPMC queue by Dmitry Vyukov.
Original code from:
http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
licensed by Dmitry Vyukov under the terms below:
Simplified BSD license
Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the authors and
should not be interpreted as representing official policies, either expressed or implied, of Dmitry Vyukov.
*/
/*
The code in its current form adds the license below:
Copyright(c) 2015 Gabi Melman.
Distributed under the MIT License (http://opensource.org/licenses/MIT)
*/
#pragma once
#include <atomic>
#include "../common.h"
namespace spdlog
{
namespace details
{
template<typename T>
class mpmc_bounded_queue
{
public:
using item_type = T;
mpmc_bounded_queue(size_t buffer_size)
: buffer_(new cell_t [buffer_size]),
buffer_mask_(buffer_size - 1)
{
//queue size must be power of two
if(!((buffer_size >= 2) && ((buffer_size & (buffer_size - 1)) == 0)))
throw spdlog_ex("async logger queue size must be power of two");
for (size_t i = 0; i != buffer_size; i += 1)
buffer_[i].sequence_.store(i, std::memory_order_relaxed);
enqueue_pos_.store(0, std::memory_order_relaxed);
dequeue_pos_.store(0, std::memory_order_relaxed);
}
~mpmc_bounded_queue()
{
delete [] buffer_;
}
bool enqueue(T&& data)
{
cell_t* cell;
size_t pos = enqueue_pos_.load(std::memory_order_relaxed);
for (;;)
{
cell = &buffer_[pos & buffer_mask_];
size_t seq = cell->sequence_.load(std::memory_order_acquire);
intptr_t dif = (intptr_t)seq - (intptr_t)pos;
if (dif == 0)
{
if (enqueue_pos_.compare_exchange_weak(pos, pos + 1, std::memory_order_relaxed))
break;
}
else if (dif < 0)
{
return false;
}
else
{
pos = enqueue_pos_.load(std::memory_order_relaxed);
}
}
cell->data_ = std::move(data);
cell->sequence_.store(pos + 1, std::memory_order_release);
return true;
}
bool dequeue(T& data)
{
cell_t* cell;
size_t pos = dequeue_pos_.load(std::memory_order_relaxed);
for (;;)
{
cell = &buffer_[pos & buffer_mask_];
size_t seq =
cell->sequence_.load(std::memory_order_acquire);
intptr_t dif = (intptr_t)seq - (intptr_t)(pos + 1);
if (dif == 0)
{
if (dequeue_pos_.compare_exchange_weak(pos, pos + 1, std::memory_order_relaxed))
break;
}
else if (dif < 0)
return false;
else
pos = dequeue_pos_.load(std::memory_order_relaxed);
}
data = std::move(cell->data_);
cell->sequence_.store(pos + buffer_mask_ + 1, std::memory_order_release);
return true;
}
private:
struct cell_t
{
std::atomic<size_t> sequence_;
T data_;
};
static size_t const cacheline_size = 64;
typedef char cacheline_pad_t [cacheline_size];
cacheline_pad_t pad0_;
cell_t* const buffer_;
size_t const buffer_mask_;
cacheline_pad_t pad1_;
std::atomic<size_t> enqueue_pos_;
cacheline_pad_t pad2_;
std::atomic<size_t> dequeue_pos_;
cacheline_pad_t pad3_;
mpmc_bounded_queue(mpmc_bounded_queue const&);
void operator = (mpmc_bounded_queue const&);
};
} // ns details
} // ns spdlog
@@ -0,0 +1,24 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
// null, no cost mutex
namespace spdlog
{
namespace details
{
struct null_mutex
{
void lock() {}
void unlock() {}
bool try_lock()
{
return true;
}
};
}
}
@@ -0,0 +1,215 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include<string>
#include<cstdio>
#include<ctime>
#ifdef _WIN32
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
#ifdef __MINGW32__
#include <share.h>
#endif
#elif __linux__
#include <sys/syscall.h> //Use gettid() syscall under linux to get thread id
#include <sys/stat.h>
#include <unistd.h>
#else
#include <thread>
#endif
#include "../common.h"
namespace spdlog
{
namespace details
{
namespace os
{
inline spdlog::log_clock::time_point now()
{
#if defined __linux__ && defined SPDLOG_CLOCK_COARSE
timespec ts;
::clock_gettime(CLOCK_REALTIME_COARSE, &ts);
return std::chrono::time_point<log_clock, typename log_clock::duration>(
std::chrono::duration_cast<typename log_clock::duration>(
std::chrono::seconds(ts.tv_sec) + std::chrono::nanoseconds(ts.tv_nsec)));
#else
return log_clock::now();
#endif
}
inline std::tm localtime(const std::time_t &time_tt)
{
#ifdef _WIN32
std::tm tm;
localtime_s(&tm, &time_tt);
#else
std::tm tm;
localtime_r(&time_tt, &tm);
#endif
return tm;
}
inline std::tm localtime()
{
std::time_t now_t = time(nullptr);
return localtime(now_t);
}
inline std::tm gmtime(const std::time_t &time_tt)
{
#ifdef _WIN32
std::tm tm;
gmtime_s(&tm, &time_tt);
#else
std::tm tm;
gmtime_r(&time_tt, &tm);
#endif
return tm;
}
inline std::tm gmtime()
{
std::time_t now_t = time(nullptr);
return gmtime(now_t);
}
inline bool operator==(const std::tm& tm1, const std::tm& tm2)
{
return (tm1.tm_sec == tm2.tm_sec &&
tm1.tm_min == tm2.tm_min &&
tm1.tm_hour == tm2.tm_hour &&
tm1.tm_mday == tm2.tm_mday &&
tm1.tm_mon == tm2.tm_mon &&
tm1.tm_year == tm2.tm_year &&
tm1.tm_isdst == tm2.tm_isdst);
}
inline bool operator!=(const std::tm& tm1, const std::tm& tm2)
{
return !(tm1 == tm2);
}
#ifdef _WIN32
inline const char* eol()
{
return "\r\n";
}
#else
constexpr inline const char* eol()
{
return "\n";
}
#endif
#ifdef _WIN32
inline unsigned short eol_size()
{
return 2;
}
#else
constexpr inline unsigned short eol_size()
{
return 1;
}
#endif
//fopen_s on non windows for writing
inline int fopen_s(FILE** fp, const std::string& filename, const char* mode)
{
#ifdef _WIN32
*fp = _fsopen((filename.c_str()), mode, _SH_DENYWR);
return *fp == nullptr;
#else
*fp = fopen((filename.c_str()), mode);
return *fp == nullptr;
#endif
}
//Return if file exists
inline bool file_exists(const std::string& filename)
{
#ifdef _WIN32
auto attribs = GetFileAttributesA(filename.c_str());
return (attribs != INVALID_FILE_ATTRIBUTES && !(attribs & FILE_ATTRIBUTE_DIRECTORY));
#elif __linux__
struct stat buffer;
return (stat (filename.c_str(), &buffer) == 0);
#else
auto *file = fopen(filename.c_str(), "r");
if (file != nullptr)
{
fclose(file);
return true;
}
return false;
#endif
}
//Return utc offset in minutes or throw spdlog_ex on failure
inline int utc_minutes_offset(const std::tm& tm = details::os::localtime())
{
#ifdef _WIN32
#if _WIN32_WINNT < _WIN32_WINNT_WS08
TIME_ZONE_INFORMATION tzinfo;
auto rv = GetTimeZoneInformation(&tzinfo);
#else
DYNAMIC_TIME_ZONE_INFORMATION tzinfo;
auto rv = GetDynamicTimeZoneInformation(&tzinfo);
#endif
if (rv == TIME_ZONE_ID_INVALID)
throw spdlog::spdlog_ex("Failed getting timezone info. Last error: " + GetLastError());
int offset = -tzinfo.Bias;
if (tm.tm_isdst)
offset -= tzinfo.DaylightBias;
else
offset -= tzinfo.StandardBias;
return offset;
#else
return static_cast<int>(tm.tm_gmtoff / 60);
#endif
}
//Return current thread id as size_t
//It exists because the std::this_thread::get_id() is much slower(espcially under VS 2013)
inline size_t thread_id()
{
#ifdef _WIN32
return static_cast<size_t>(::GetCurrentThreadId());
#elif __linux__
# if defined(__ANDROID__) && defined(__ANDROID_API__) && (__ANDROID_API__ < 21)
# define SYS_gettid __NR_gettid
# endif
return static_cast<size_t>(syscall(SYS_gettid));
#else //Default to standard C++11 (OSX and other Unix)
return static_cast<size_t>(std::hash<std::thread::id>()(std::this_thread::get_id()));
#endif
}
} //os
} //details
} //spdlog
@@ -0,0 +1,625 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <string>
#include <chrono>
#include <memory>
#include <vector>
#include <thread>
#include "../formatter.h"
#include "./log_msg.h"
#include "./os.h"
namespace spdlog
{
namespace details
{
class flag_formatter
{
public:
virtual ~flag_formatter() {}
virtual void format(details::log_msg& msg, const std::tm& tm_time) = 0;
};
///////////////////////////////////////////////////////////////////////
// name & level pattern appenders
///////////////////////////////////////////////////////////////////////
namespace
{
class name_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
msg.formatted << msg.logger_name;
}
};
}
// log level appender
class level_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
msg.formatted << level::to_str(msg.level);
}
};
// short log level appender
class short_level_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
msg.formatted << level::to_short_str(msg.level);
}
};
///////////////////////////////////////////////////////////////////////
// Date time pattern appenders
///////////////////////////////////////////////////////////////////////
static const char* ampm(const tm& t)
{
return t.tm_hour >= 12 ? "PM" : "AM";
}
static int to12h(const tm& t)
{
return t.tm_hour > 12 ? t.tm_hour - 12 : t.tm_hour;
}
//Abbreviated weekday name
static const std::string days[] { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
class a_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << days[tm_time.tm_wday];
}
};
//Full weekday name
static const std::string full_days[] { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" };
class A_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << full_days[tm_time.tm_wday];
}
};
//Abbreviated month
static const std::string months[] { "Jan", "Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec" };
class b_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted<< months[tm_time.tm_mon];
}
};
//Full month name
static const std::string full_months[] { "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" };
class B_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << full_months[tm_time.tm_mon];
}
};
//write 2 ints seperated by sep with padding of 2
static fmt::MemoryWriter& pad_n_join(fmt::MemoryWriter& w, int v1, int v2, char sep)
{
w << fmt::pad(v1, 2, '0') << sep << fmt::pad(v2, 2, '0');
return w;
}
//write 3 ints seperated by sep with padding of 2
static fmt::MemoryWriter& pad_n_join(fmt::MemoryWriter& w, int v1, int v2, int v3, char sep)
{
w << fmt::pad(v1, 2, '0') << sep << fmt::pad(v2, 2, '0') << sep << fmt::pad(v3, 2, '0');
return w;
}
//Date and time representation (Thu Aug 23 15:35:46 2014)
class c_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << days[tm_time.tm_wday] << ' ' << months[tm_time.tm_mon] << ' ' << tm_time.tm_mday << ' ';
pad_n_join(msg.formatted, tm_time.tm_hour, tm_time.tm_min, tm_time.tm_sec, ':') << ' ' << tm_time.tm_year + 1900;
}
};
// year - 2 digit
class C_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << fmt::pad(tm_time.tm_year % 100, 2, '0');
}
};
// Short MM/DD/YY date, equivalent to %m/%d/%y 08/23/01
class D_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
pad_n_join(msg.formatted, tm_time.tm_mon + 1, tm_time.tm_mday, tm_time.tm_year % 100, '/');
}
};
// year - 4 digit
class Y_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << tm_time.tm_year + 1900;
}
};
// month 1-12
class m_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << fmt::pad(tm_time.tm_mon + 1, 2, '0');
}
};
// day of month 1-31
class d_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << fmt::pad(tm_time.tm_mday, 2, '0');
}
};
// hours in 24 format 0-23
class H_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << fmt::pad(tm_time.tm_hour, 2, '0');
}
};
// hours in 12 format 1-12
class I_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << fmt::pad(to12h(tm_time), 2, '0');
}
};
// minutes 0-59
class M_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << fmt::pad(tm_time.tm_min, 2, '0');
}
};
// seconds 0-59
class S_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << fmt::pad(tm_time.tm_sec, 2, '0');
}
};
// milliseconds
class e_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
auto duration = msg.time.time_since_epoch();
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() % 1000;
msg.formatted << fmt::pad(static_cast<int>(millis), 3, '0');
}
};
// microseconds
class f_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
auto duration = msg.time.time_since_epoch();
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(duration).count() % 1000000;
msg.formatted << fmt::pad(static_cast<int>(micros), 6, '0');
}
};
// nanoseconds
class F_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
auto duration = msg.time.time_since_epoch();
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(duration).count() % 1000000000;
msg.formatted << fmt::pad(static_cast<int>(ns), 9, '0');
}
};
// AM/PM
class p_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
msg.formatted << ampm(tm_time);
}
};
// 12 hour clock 02:55:02 pm
class r_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
pad_n_join(msg.formatted, to12h(tm_time), tm_time.tm_min, tm_time.tm_sec, ':') << ' ' << ampm(tm_time);
}
};
// 24-hour HH:MM time, equivalent to %H:%M
class R_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
pad_n_join(msg.formatted, tm_time.tm_hour, tm_time.tm_min, ':');
}
};
// ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S
class T_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
pad_n_join(msg.formatted, tm_time.tm_hour, tm_time.tm_min, tm_time.tm_sec, ':');
}
};
// ISO 8601 offset from UTC in timezone (+-HH:MM)
class z_formatter :public flag_formatter
{
public:
const std::chrono::seconds cache_refresh = std::chrono::seconds(5);
z_formatter() :_last_update(std::chrono::seconds(0)) {}
z_formatter(const z_formatter&) = delete;
z_formatter& operator=(const z_formatter&) = delete;
void format(details::log_msg& msg, const std::tm& tm_time) override
{
#ifdef _WIN32
int total_minutes = get_cached_offset(msg, tm_time);
#else
// No need to chache under gcc,
// it is very fast (already stored in tm.tm_gmtoff)
int total_minutes = os::utc_minutes_offset(tm_time);
#endif
int h = total_minutes / 60;
int m = total_minutes % 60;
if (h >= 0) //minus sign will be printed anyway if negative
{
msg.formatted << '+';
}
pad_n_join(msg.formatted, h, m, ':');
}
private:
log_clock::time_point _last_update;
int _offset_minutes;
std::mutex _mutex;
int get_cached_offset(const log_msg& msg, const std::tm& tm_time)
{
using namespace std::chrono;
std::lock_guard<std::mutex> l(_mutex);
if (msg.time - _last_update >= cache_refresh)
{
_offset_minutes = os::utc_minutes_offset(tm_time);
_last_update = msg.time;
}
return _offset_minutes;
}
};
//Thread id
class t_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
msg.formatted << msg.thread_id;
}
};
class v_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm&) override
{
msg.formatted << fmt::StringRef(msg.raw.data(), msg.raw.size());
}
};
class ch_formatter :public flag_formatter
{
public:
explicit ch_formatter(char ch) : _ch(ch)
{}
void format(details::log_msg& msg, const std::tm&) override
{
msg.formatted << _ch;
}
private:
char _ch;
};
//aggregate user chars to display as is
class aggregate_formatter :public flag_formatter
{
public:
aggregate_formatter()
{}
void add_ch(char ch)
{
_str += ch;
}
void format(details::log_msg& msg, const std::tm&) override
{
msg.formatted << _str;
}
private:
std::string _str;
};
// Full info formatter
// pattern: [%Y-%m-%d %H:%M:%S.%e] [%n] [%l] %v
class full_formatter :public flag_formatter
{
void format(details::log_msg& msg, const std::tm& tm_time) override
{
#ifndef SPDLOG_NO_DATETIME
auto duration = msg.time.time_since_epoch();
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count() % 1000;
/* Slower version(while still very fast - about 3.2 million lines/sec under 10 threads),
msg.formatted.write("[{:d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:03d}] [{}] [{}] {} ",
tm_time.tm_year + 1900,
tm_time.tm_mon + 1,
tm_time.tm_mday,
tm_time.tm_hour,
tm_time.tm_min,
tm_time.tm_sec,
static_cast<int>(millis),
msg.logger_name,
level::to_str(msg.level),
msg.raw.str());*/
// Faster (albeit uglier) way to format the line (5.6 million lines/sec under 10 threads)
msg.formatted << '[' << static_cast<unsigned int>(tm_time.tm_year + 1900) << '-'
<< fmt::pad(static_cast<unsigned int>(tm_time.tm_mon + 1), 2, '0') << '-'
<< fmt::pad(static_cast<unsigned int>(tm_time.tm_mday), 2, '0') << ' '
<< fmt::pad(static_cast<unsigned int>(tm_time.tm_hour), 2, '0') << ':'
<< fmt::pad(static_cast<unsigned int>(tm_time.tm_min), 2, '0') << ':'
<< fmt::pad(static_cast<unsigned int>(tm_time.tm_sec), 2, '0') << '.'
<< fmt::pad(static_cast<unsigned int>(millis), 3, '0') << "] ";
//no datetime needed
#else
(void)tm_time;
#endif
#ifndef SPDLOG_NO_NAME
msg.formatted << '[' << msg.logger_name << "] ";
#endif
msg.formatted << '[' << level::to_str(msg.level) << "] ";
msg.formatted << fmt::StringRef(msg.raw.data(), msg.raw.size());
}
};
}
}
///////////////////////////////////////////////////////////////////////////////
// pattern_formatter inline impl
///////////////////////////////////////////////////////////////////////////////
inline spdlog::pattern_formatter::pattern_formatter(const std::string& pattern)
{
compile_pattern(pattern);
}
inline void spdlog::pattern_formatter::compile_pattern(const std::string& pattern)
{
auto end = pattern.end();
std::unique_ptr<details::aggregate_formatter> user_chars;
for (auto it = pattern.begin(); it != end; ++it)
{
if (*it == '%')
{
if (user_chars) //append user chars found so far
_formatters.push_back(std::move(user_chars));
if (++it != end)
handle_flag(*it);
else
break;
}
else // chars not following the % sign should be displayed as is
{
if (!user_chars)
user_chars = std::unique_ptr<details::aggregate_formatter>(new details::aggregate_formatter());
user_chars->add_ch(*it);
}
}
if (user_chars) //append raw chars found so far
{
_formatters.push_back(std::move(user_chars));
}
}
inline void spdlog::pattern_formatter::handle_flag(char flag)
{
switch (flag)
{
// logger name
case 'n':
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::name_formatter()));
break;
case 'l':
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::level_formatter()));
break;
case 'L':
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::short_level_formatter()));
break;
case('t') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::t_formatter()));
break;
case('v') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::v_formatter()));
break;
case('a') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::a_formatter()));
break;
case('A') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::A_formatter()));
break;
case('b') :
case('h') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::b_formatter()));
break;
case('B') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::B_formatter()));
break;
case('c') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::c_formatter()));
break;
case('C') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::C_formatter()));
break;
case('Y') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::Y_formatter()));
break;
case('D') :
case('x') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::D_formatter()));
break;
case('m') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::m_formatter()));
break;
case('d') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::d_formatter()));
break;
case('H') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::H_formatter()));
break;
case('I') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::I_formatter()));
break;
case('M') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::M_formatter()));
break;
case('S') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::S_formatter()));
break;
case('e') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::e_formatter()));
break;
case('f') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::f_formatter()));
break;
case('F') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::F_formatter()));
break;
case('p') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::p_formatter()));
break;
case('r') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::r_formatter()));
break;
case('R') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::R_formatter()));
break;
case('T') :
case('X') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::T_formatter()));
break;
case('z') :
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::z_formatter()));
break;
case ('+'):
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::full_formatter()));
break;
default: //Unkown flag appears as is
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::ch_formatter('%')));
_formatters.push_back(std::unique_ptr<details::flag_formatter>(new details::ch_formatter(flag)));
break;
}
}
inline void spdlog::pattern_formatter::format(details::log_msg& msg)
{
try
{
auto tm_time = details::os::localtime(log_clock::to_time_t(msg.time));
for (auto &f : _formatters)
{
f->format(msg, tm_time);
}
//write eol
msg.formatted << details::os::eol();
}
catch(const fmt::FormatError& e)
{
throw spdlog_ex(fmt::format("formatting error while processing format string: {}", e.what()));
}
}
@@ -0,0 +1,162 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
// Loggers registy of unique name->logger pointer
// An attempt to create a logger with an already existing name will be ignored
// If user requests a non existing logger, nullptr will be returned
// This class is thread safe
#include <string>
#include <mutex>
#include <unordered_map>
#include <functional>
#include "./null_mutex.h"
#include "../logger.h"
#include "../async_logger.h"
#include "../common.h"
namespace spdlog
{
namespace details
{
template <class Mutex> class registry_t
{
public:
void register_logger(std::shared_ptr<logger> logger)
{
std::lock_guard<Mutex> lock(_mutex);
register_logger_impl(logger);
}
std::shared_ptr<logger> get(const std::string& logger_name)
{
std::lock_guard<Mutex> lock(_mutex);
auto found = _loggers.find(logger_name);
return found == _loggers.end() ? nullptr : found->second;
}
template<class It>
std::shared_ptr<logger> create(const std::string& logger_name, const It& sinks_begin, const It& sinks_end)
{
std::shared_ptr<logger> new_logger;
std::lock_guard<Mutex> lock(_mutex);
if (_async_mode)
new_logger = std::make_shared<async_logger>(logger_name, sinks_begin, sinks_end, _async_q_size, _overflow_policy, _worker_warmup_cb, _flush_interval_ms);
else
new_logger = std::make_shared<logger>(logger_name, sinks_begin, sinks_end);
if (_formatter)
new_logger->set_formatter(_formatter);
new_logger->set_level(_level);
register_logger_impl(new_logger);
return new_logger;
}
void drop(const std::string& logger_name)
{
std::lock_guard<Mutex> lock(_mutex);
_loggers.erase(logger_name);
}
void drop_all()
{
std::lock_guard<Mutex> lock(_mutex);
_loggers.clear();
}
std::shared_ptr<logger> create(const std::string& logger_name, sinks_init_list sinks)
{
return create(logger_name, sinks.begin(), sinks.end());
}
std::shared_ptr<logger> create(const std::string& logger_name, sink_ptr sink)
{
return create(logger_name, { sink });
}
void formatter(formatter_ptr f)
{
std::lock_guard<Mutex> lock(_mutex);
_formatter = f;
for (auto& l : _loggers)
l.second->set_formatter(_formatter);
}
void set_pattern(const std::string& pattern)
{
std::lock_guard<Mutex> lock(_mutex);
_formatter = std::make_shared<pattern_formatter>(pattern);
for (auto& l : _loggers)
l.second->set_formatter(_formatter);
}
void set_level(level::level_enum log_level)
{
std::lock_guard<Mutex> lock(_mutex);
for (auto& l : _loggers)
l.second->set_level(log_level);
_level = log_level;
}
void set_async_mode(size_t q_size, const async_overflow_policy overflow_policy, const std::function<void()>& worker_warmup_cb, const std::chrono::milliseconds& flush_interval_ms)
{
std::lock_guard<Mutex> lock(_mutex);
_async_mode = true;
_async_q_size = q_size;
_overflow_policy = overflow_policy;
_worker_warmup_cb = worker_warmup_cb;
_flush_interval_ms = flush_interval_ms;
}
void set_sync_mode()
{
std::lock_guard<Mutex> lock(_mutex);
_async_mode = false;
}
static registry_t<Mutex>& instance()
{
static registry_t<Mutex> s_instance;
return s_instance;
}
private:
void register_logger_impl(std::shared_ptr<logger> logger)
{
auto logger_name = logger->name();
if (_loggers.find(logger_name) != std::end(_loggers))
throw spdlog_ex("logger with name " + logger_name + " already exists");
_loggers[logger->name()] = logger;
}
registry_t<Mutex>() {}
registry_t<Mutex>(const registry_t<Mutex>&) = delete;
registry_t<Mutex>& operator=(const registry_t<Mutex>&) = delete;
Mutex _mutex;
std::unordered_map <std::string, std::shared_ptr<logger>> _loggers;
formatter_ptr _formatter;
level::level_enum _level = level::info;
bool _async_mode = false;
size_t _async_q_size = 0;
async_overflow_policy _overflow_policy = async_overflow_policy::block_retry;
std::function<void()> _worker_warmup_cb = nullptr;
std::chrono::milliseconds _flush_interval_ms;
};
#ifdef SPDLOG_NO_REGISTRY_MUTEX
typedef registry_t<spdlog::details::null_mutex> registry;
#else
typedef registry_t<std::mutex> registry;
#endif
}
}
@@ -0,0 +1,135 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
//
// Global registry functions
//
#include "registry.h"
#include "../sinks/file_sinks.h"
#include "../sinks/stdout_sinks.h"
#include "../sinks/syslog_sink.h"
inline void spdlog::register_logger(std::shared_ptr<logger> logger)
{
return details::registry::instance().register_logger(logger);
}
inline std::shared_ptr<spdlog::logger> spdlog::get(const std::string& name)
{
return details::registry::instance().get(name);
}
inline void spdlog::drop(const std::string &name)
{
details::registry::instance().drop(name);
}
// Create multi/single threaded rotating file logger
inline std::shared_ptr<spdlog::logger> spdlog::rotating_logger_mt(const std::string& logger_name, const std::string& filename, size_t max_file_size, size_t max_files, bool force_flush)
{
return create<spdlog::sinks::rotating_file_sink_mt>(logger_name, filename, "txt", max_file_size, max_files, force_flush);
}
inline std::shared_ptr<spdlog::logger> spdlog::rotating_logger_st(const std::string& logger_name, const std::string& filename, size_t max_file_size, size_t max_files, bool force_flush)
{
return create<spdlog::sinks::rotating_file_sink_st>(logger_name, filename, "txt", max_file_size, max_files, force_flush);
}
// Create file logger which creates new file at midnight):
inline std::shared_ptr<spdlog::logger> spdlog::daily_logger_mt(const std::string& logger_name, const std::string& filename, int hour, int minute, bool force_flush)
{
return create<spdlog::sinks::daily_file_sink_mt>(logger_name, filename, "txt", hour, minute, force_flush);
}
inline std::shared_ptr<spdlog::logger> spdlog::daily_logger_st(const std::string& logger_name, const std::string& filename, int hour, int minute, bool force_flush)
{
return create<spdlog::sinks::daily_file_sink_st>(logger_name, filename, "txt", hour, minute, force_flush);
}
// Create stdout/stderr loggers
inline std::shared_ptr<spdlog::logger> spdlog::stdout_logger_mt(const std::string& logger_name)
{
return details::registry::instance().create(logger_name, spdlog::sinks::stdout_sink_mt::instance());
}
inline std::shared_ptr<spdlog::logger> spdlog::stdout_logger_st(const std::string& logger_name)
{
return details::registry::instance().create(logger_name, spdlog::sinks::stdout_sink_st::instance());
}
inline std::shared_ptr<spdlog::logger> spdlog::stderr_logger_mt(const std::string& logger_name)
{
return details::registry::instance().create(logger_name, spdlog::sinks::stderr_sink_mt::instance());
}
inline std::shared_ptr<spdlog::logger> spdlog::stderr_logger_st(const std::string& logger_name)
{
return details::registry::instance().create(logger_name, spdlog::sinks::stderr_sink_st::instance());
}
#ifdef __linux__
// Create syslog logger
inline std::shared_ptr<spdlog::logger> spdlog::syslog_logger(const std::string& logger_name, const std::string& syslog_ident, int syslog_option)
{
return create<spdlog::sinks::syslog_sink>(logger_name, syslog_ident, syslog_option);
}
#endif
//Create logger with multiple sinks
inline std::shared_ptr<spdlog::logger> spdlog::create(const std::string& logger_name, spdlog::sinks_init_list sinks)
{
return details::registry::instance().create(logger_name, sinks);
}
template <typename Sink, typename... Args>
inline std::shared_ptr<spdlog::logger> spdlog::create(const std::string& logger_name, Args... args)
{
sink_ptr sink = std::make_shared<Sink>(args...);
return details::registry::instance().create(logger_name, { sink });
}
template<class It>
inline std::shared_ptr<spdlog::logger> spdlog::create(const std::string& logger_name, const It& sinks_begin, const It& sinks_end)
{
return details::registry::instance().create(logger_name, sinks_begin, sinks_end);
}
inline void spdlog::set_formatter(spdlog::formatter_ptr f)
{
details::registry::instance().formatter(f);
}
inline void spdlog::set_pattern(const std::string& format_string)
{
return details::registry::instance().set_pattern(format_string);
}
inline void spdlog::set_level(level::level_enum log_level)
{
return details::registry::instance().set_level(log_level);
}
inline void spdlog::set_async_mode(size_t queue_size, const async_overflow_policy overflow_policy, const std::function<void()>& worker_warmup_cb, const std::chrono::milliseconds& flush_interval_ms)
{
details::registry::instance().set_async_mode(queue_size, overflow_policy, worker_warmup_cb, flush_interval_ms);
}
inline void spdlog::set_sync_mode()
{
details::registry::instance().set_sync_mode();
}
inline void spdlog::drop_all()
{
details::registry::instance().drop_all();
}
@@ -0,0 +1,35 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include "details/log_msg.h"
namespace spdlog {
namespace details {
class flag_formatter;
}
class formatter {
public:
virtual ~formatter() {}
virtual void format(details::log_msg& msg) = 0;
};
class pattern_formatter : public formatter {
public:
explicit pattern_formatter(const std::string& pattern);
pattern_formatter(const pattern_formatter&) = delete;
pattern_formatter& operator=(const pattern_formatter&) = delete;
void format(details::log_msg& msg) override;
private:
const std::string _pattern;
std::vector<std::unique_ptr<details::flag_formatter>> _formatters;
void handle_flag(char flag);
void compile_pattern(const std::string& pattern);
};
}
#include "details/pattern_formatter_impl.h"
@@ -0,0 +1,123 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
// Thread safe logger
// Has name, log level, vector of std::shared sink pointers and formatter
// Upon each log write the logger:
// 1. Checks if its log level is enough to log the message
// 2. Format the message using the formatter function
// 3. Pass the formatted message to its sinks to performa the actual logging
#include <vector>
#include <memory>
#include "sinks/base_sink.h"
#include "common.h"
namespace spdlog {
namespace details {
class line_logger;
}
class logger {
public:
logger(const std::string& logger_name, sink_ptr single_sink);
logger(const std::string& name, sinks_init_list);
template <class It>
logger(const std::string& name, const It& begin, const It& end);
virtual ~logger();
logger(const logger&) = delete;
logger& operator=(const logger&) = delete;
void set_level(level::level_enum);
level::level_enum level() const;
const std::string& name() const;
bool should_log(level::level_enum) const;
// logger.info(cppformat_string, arg1, arg2, arg3, ...) call style
template <typename... Args>
details::line_logger trace(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger debug(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger info(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger notice(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger warn(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger error(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger critical(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger alert(const char* fmt, const Args&... args);
template <typename... Args>
details::line_logger emerg(const char* fmt, const Args&... args);
// logger.info(msg) << ".." call style
template <typename T>
details::line_logger trace(const T&);
template <typename T>
details::line_logger debug(const T&);
template <typename T>
details::line_logger info(const T&);
template <typename T>
details::line_logger notice(const T&);
template <typename T>
details::line_logger warn(const T&);
template <typename T>
details::line_logger error(const T&);
template <typename T>
details::line_logger critical(const T&);
template <typename T>
details::line_logger alert(const T&);
template <typename T>
details::line_logger emerg(const T&);
// logger.info() << ".." call style
details::line_logger trace();
details::line_logger debug();
details::line_logger info();
details::line_logger notice();
details::line_logger warn();
details::line_logger error();
details::line_logger critical();
details::line_logger alert();
details::line_logger emerg();
// Create log message with the given level, no matter what is the actual logger's level
template <typename... Args>
details::line_logger force_log(level::level_enum lvl, const char* fmt, const Args&... args);
// Set the format of the log messages from this logger
void set_pattern(const std::string&);
void set_formatter(formatter_ptr);
virtual void flush();
protected:
virtual void _log_msg(details::log_msg&);
virtual void _set_pattern(const std::string&);
virtual void _set_formatter(formatter_ptr);
details::line_logger _log_if_enabled(level::level_enum lvl);
template <typename... Args>
details::line_logger
_log_if_enabled(level::level_enum lvl, const char* fmt, const Args&... args);
template <typename T>
inline details::line_logger _log_if_enabled(level::level_enum lvl, const T& msg);
friend details::line_logger;
std::string _name;
std::vector<sink_ptr> _sinks;
formatter_ptr _formatter;
std::atomic_int _level;
};
}
#include "./details/logger_impl.h"
@@ -0,0 +1,67 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#if defined(__ANDROID__)
#include <mutex>
#include "base_sink.h"
#include "../details/null_mutex.h"
#include <android/log.h>
namespace spdlog {
namespace sinks {
/*
* Android sink (logging using __android_log_write)
*/
template <class Mutex>
class base_android_sink : public base_sink<Mutex> {
public:
explicit base_android_sink(std::string tag = "spdlog") : _tag(tag) {}
void
flush() override {}
protected:
void
_sink_it(const details::log_msg& msg) override {
const android_LogPriority priority = convert_to_android(msg.level);
const int expected_size = msg.formatted.size();
const int size = __android_log_write(priority, _tag.c_str(), msg.formatted.c_str());
if (size > expected_size) {
// Will write a little bit more than original message
} else {
throw spdlog_ex("Send to Android logcat failed");
}
}
private:
static android_LogPriority
convert_to_android(spdlog::level::level_enum level) {
switch (level) {
case spdlog::level::trace: return ANDROID_LOG_VERBOSE;
case spdlog::level::debug: return ANDROID_LOG_DEBUG;
case spdlog::level::info: return ANDROID_LOG_INFO;
case spdlog::level::notice: return ANDROID_LOG_INFO;
case spdlog::level::warn: return ANDROID_LOG_WARN;
case spdlog::level::err: return ANDROID_LOG_ERROR;
case spdlog::level::critical: return ANDROID_LOG_FATAL;
case spdlog::level::alert: return ANDROID_LOG_FATAL;
case spdlog::level::emerg: return ANDROID_LOG_FATAL;
default: throw spdlog_ex("Incorrect level value");
}
}
std::string _tag;
};
typedef base_android_sink<std::mutex> android_sink_mt;
typedef base_android_sink<details::null_mutex> android_sink_st;
}
}
#endif
@@ -0,0 +1,43 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
//
// base sink templated over a mutex (either dummy or realy)
// concrete implementation should only overrid the _sink_it method.
// all locking is taken care of here so no locking needed by the implementors..
//
#include <string>
#include <mutex>
#include <atomic>
#include "./sink.h"
#include "../formatter.h"
#include "../common.h"
#include "../details/log_msg.h"
namespace spdlog {
namespace sinks {
template <class Mutex>
class base_sink : public sink {
public:
base_sink() : _mutex() {}
virtual ~base_sink() = default;
base_sink(const base_sink&) = delete;
base_sink& operator=(const base_sink&) = delete;
void
log(const details::log_msg& msg) override {
std::lock_guard<Mutex> lock(_mutex);
_sink_it(msg);
}
protected:
virtual void _sink_it(const details::log_msg& msg) = 0;
Mutex _mutex;
};
}
}
@@ -0,0 +1,62 @@
//
// Copyright (c) 2015 David Schury, Gabi Melman
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <algorithm>
#include <memory>
#include <mutex>
#include <list>
#include "../details/log_msg.h"
#include "../details/null_mutex.h"
#include "./base_sink.h"
#include "./sink.h"
namespace spdlog {
namespace sinks {
template <class Mutex>
class dist_sink : public base_sink<Mutex> {
public:
explicit dist_sink() : _sinks() {}
dist_sink(const dist_sink&) = delete;
dist_sink& operator=(const dist_sink&) = delete;
virtual ~dist_sink() = default;
protected:
void
_sink_it(const details::log_msg& msg) override {
for (auto iter = _sinks.begin(); iter != _sinks.end(); iter++) (*iter)->log(msg);
}
std::vector<std::shared_ptr<sink>> _sinks;
public:
void
flush() override {
std::lock_guard<Mutex> lock(base_sink<Mutex>::_mutex);
for (auto iter = _sinks.begin(); iter != _sinks.end(); iter++) (*iter)->flush();
}
void
add_sink(std::shared_ptr<sink> sink) {
std::lock_guard<Mutex> lock(base_sink<Mutex>::_mutex);
if (sink && _sinks.end() == std::find(_sinks.begin(), _sinks.end(), sink)) {
_sinks.push_back(sink);
}
}
void
remove_sink(std::shared_ptr<sink> sink) {
std::lock_guard<Mutex> lock(base_sink<Mutex>::_mutex);
auto pos = std::find(_sinks.begin(), _sinks.end(), sink);
if (pos != _sinks.end()) { _sinks.erase(pos); }
}
};
typedef dist_sink<std::mutex> dist_sink_mt;
typedef dist_sink<details::null_mutex> dist_sink_st;
}
}
@@ -0,0 +1,210 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <mutex>
#include "base_sink.h"
#include "../details/null_mutex.h"
#include "../details/file_helper.h"
#include "../details/format.h"
namespace spdlog {
namespace sinks {
/*
* Trivial file sink with single file as target
*/
template <class Mutex>
class simple_file_sink : public base_sink<Mutex> {
public:
explicit simple_file_sink(const std::string& filename, bool force_flush = false)
: _file_helper(force_flush) {
_file_helper.open(filename);
}
void
flush() override {
_file_helper.flush();
}
protected:
void
_sink_it(const details::log_msg& msg) override {
_file_helper.write(msg);
}
private:
details::file_helper _file_helper;
};
typedef simple_file_sink<std::mutex> simple_file_sink_mt;
typedef simple_file_sink<details::null_mutex> simple_file_sink_st;
/*
* Rotating file sink based on size
*/
template <class Mutex>
class rotating_file_sink : public base_sink<Mutex> {
public:
rotating_file_sink(const std::string& base_filename,
const std::string& extension,
std::size_t max_size,
std::size_t max_files,
bool force_flush = false)
: _base_filename(base_filename)
, _extension(extension)
, _max_size(max_size)
, _max_files(max_files)
, _current_size(0)
, _file_helper(force_flush) {
_file_helper.open(calc_filename(_base_filename, 0, _extension));
_current_size = _file_helper.size(); // expensive. called only once
}
void
flush() override {
_file_helper.flush();
}
protected:
void
_sink_it(const details::log_msg& msg) override {
_current_size += msg.formatted.size();
if (_current_size > _max_size) {
_rotate();
_current_size = msg.formatted.size();
}
_file_helper.write(msg);
}
private:
static std::string
calc_filename(const std::string& filename, std::size_t index, const std::string& extension) {
fmt::MemoryWriter w;
if (index)
w.write("{}.{}.{}", filename, index, extension);
else
w.write("{}.{}", filename, extension);
return w.str();
}
// Rotate files:
// log.txt -> log.1.txt
// log.1.txt -> log2.txt
// log.2.txt -> log3.txt
// log.3.txt -> delete
void
_rotate() {
_file_helper.close();
for (auto i = _max_files; i > 0; --i) {
std::string src = calc_filename(_base_filename, i - 1, _extension);
std::string target = calc_filename(_base_filename, i, _extension);
if (details::file_helper::file_exists(target)) {
if (std::remove(target.c_str()) != 0) {
throw spdlog_ex("rotating_file_sink: failed removing " + target);
}
}
if (details::file_helper::file_exists(src) &&
std::rename(src.c_str(), target.c_str())) {
throw spdlog_ex("rotating_file_sink: failed renaming " + src + " to " + target);
}
}
_file_helper.reopen(true);
}
std::string _base_filename;
std::string _extension;
std::size_t _max_size;
std::size_t _max_files;
std::size_t _current_size;
details::file_helper _file_helper;
};
typedef rotating_file_sink<std::mutex> rotating_file_sink_mt;
typedef rotating_file_sink<details::null_mutex> rotating_file_sink_st;
/*
* Rotating file sink based on date. rotates at midnight
*/
template <class Mutex>
class daily_file_sink : public base_sink<Mutex> {
public:
// create daily file sink which rotates on given time
daily_file_sink(const std::string& base_filename,
const std::string& extension,
int rotation_hour,
int rotation_minute,
bool force_flush = false)
: _base_filename(base_filename)
, _extension(extension)
, _rotation_h(rotation_hour)
, _rotation_m(rotation_minute)
, _file_helper(force_flush) {
if (rotation_hour < 0 || rotation_hour > 23 || rotation_minute < 0 || rotation_minute > 59)
throw spdlog_ex("daily_file_sink: Invalid rotation time in ctor");
_rotation_tp = _next_rotation_tp();
_file_helper.open(calc_filename(_base_filename, _extension));
}
void
flush() override {
_file_helper.flush();
}
protected:
void
_sink_it(const details::log_msg& msg) override {
if (std::chrono::system_clock::now() >= _rotation_tp) {
_file_helper.open(calc_filename(_base_filename, _extension));
_rotation_tp = _next_rotation_tp();
}
_file_helper.write(msg);
}
private:
std::chrono::system_clock::time_point
_next_rotation_tp() {
using namespace std::chrono;
auto now = system_clock::now();
time_t tnow = std::chrono::system_clock::to_time_t(now);
tm date = spdlog::details::os::localtime(tnow);
date.tm_hour = _rotation_h;
date.tm_min = _rotation_m;
date.tm_sec = 0;
auto rotation_time = std::chrono::system_clock::from_time_t(std::mktime(&date));
if (rotation_time > now)
return rotation_time;
else
return system_clock::time_point(rotation_time + hours(24));
}
// Create filename for the form basename.YYYY-MM-DD.extension
static std::string
calc_filename(const std::string& basename, const std::string& extension) {
std::tm tm = spdlog::details::os::localtime();
fmt::MemoryWriter w;
w.write("{}_{:04d}-{:02d}-{:02d}_{:02d}-{:02d}.{}",
basename,
tm.tm_year + 1900,
tm.tm_mon + 1,
tm.tm_mday,
tm.tm_hour,
tm.tm_min,
extension);
return w.str();
}
std::string _base_filename;
std::string _extension;
int _rotation_h;
int _rotation_m;
std::chrono::system_clock::time_point _rotation_tp;
details::file_helper _file_helper;
};
typedef daily_file_sink<std::mutex> daily_file_sink_mt;
typedef daily_file_sink<details::null_mutex> daily_file_sink_st;
}
}
@@ -0,0 +1,26 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <mutex>
#include "./base_sink.h"
#include "../details/null_mutex.h"
namespace spdlog {
namespace sinks {
template <class Mutex>
class null_sink : public base_sink<Mutex> {
protected:
void
_sink_it(const details::log_msg&) override {}
void
flush() override {}
};
typedef null_sink<details::null_mutex> null_sink_st;
typedef null_sink<std::mutex> null_sink_mt;
}
}
@@ -0,0 +1,45 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <ostream>
#include <mutex>
#include <memory>
#include "../details/null_mutex.h"
#include "./base_sink.h"
namespace spdlog {
namespace sinks {
template <class Mutex>
class ostream_sink : public base_sink<Mutex> {
public:
explicit ostream_sink(std::ostream& os, bool force_flush = false)
: _ostream(os), _force_flush(force_flush) {}
ostream_sink(const ostream_sink&) = delete;
ostream_sink& operator=(const ostream_sink&) = delete;
virtual ~ostream_sink() = default;
protected:
void
_sink_it(const details::log_msg& msg) override {
_ostream.write(msg.formatted.data(), msg.formatted.size());
if (_force_flush) _ostream.flush();
}
void
flush() override {
_ostream.flush();
}
std::ostream& _ostream;
bool _force_flush;
};
typedef ostream_sink<std::mutex> ostream_sink_mt;
typedef ostream_sink<details::null_mutex> ostream_sink_st;
}
}
@@ -0,0 +1,19 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include "../details/log_msg.h"
namespace spdlog {
namespace sinks {
class sink {
public:
virtual ~sink() {}
virtual void log(const details::log_msg& msg) = 0;
virtual void flush() = 0;
};
}
}
@@ -0,0 +1,48 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#include <iostream>
#include <mutex>
#include "./ostream_sink.h"
#include "../details/null_mutex.h"
namespace spdlog {
namespace sinks {
template <class Mutex>
class stdout_sink : public ostream_sink<Mutex> {
using MyType = stdout_sink<Mutex>;
public:
stdout_sink() : ostream_sink<Mutex>(std::cout, true) {}
static std::shared_ptr<MyType>
instance() {
static std::shared_ptr<MyType> instance = std::make_shared<MyType>();
return instance;
}
};
typedef stdout_sink<details::null_mutex> stdout_sink_st;
typedef stdout_sink<std::mutex> stdout_sink_mt;
template <class Mutex>
class stderr_sink : public ostream_sink<Mutex> {
using MyType = stderr_sink<Mutex>;
public:
stderr_sink() : ostream_sink<Mutex>(std::cerr, true) {}
static std::shared_ptr<MyType>
instance() {
static std::shared_ptr<MyType> instance = std::make_shared<MyType>();
return instance;
}
};
typedef stderr_sink<std::mutex> stderr_sink_mt;
typedef stderr_sink<details::null_mutex> stderr_sink_st;
}
}
@@ -0,0 +1,76 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
#pragma once
#ifdef __linux__
#include <array>
#include <string>
#include <syslog.h>
#include "./sink.h"
#include "../common.h"
#include "../details/log_msg.h"
namespace spdlog {
namespace sinks {
/**
* Sink that write to syslog using the `syscall()` library call.
*
* Locking is not needed, as `syslog()` itself is thread-safe.
*/
class syslog_sink : public sink {
public:
//
syslog_sink(const std::string& ident = "",
int syslog_option = 0,
int syslog_facility = LOG_USER)
: _ident(ident) {
_priorities[static_cast<int>(level::trace)] = LOG_DEBUG;
_priorities[static_cast<int>(level::debug)] = LOG_DEBUG;
_priorities[static_cast<int>(level::info)] = LOG_INFO;
_priorities[static_cast<int>(level::notice)] = LOG_NOTICE;
_priorities[static_cast<int>(level::warn)] = LOG_WARNING;
_priorities[static_cast<int>(level::err)] = LOG_ERR;
_priorities[static_cast<int>(level::critical)] = LOG_CRIT;
_priorities[static_cast<int>(level::alert)] = LOG_ALERT;
_priorities[static_cast<int>(level::emerg)] = LOG_EMERG;
_priorities[static_cast<int>(level::off)] = LOG_INFO;
// set ident to be program name if empty
::openlog(_ident.empty() ? nullptr : _ident.c_str(), syslog_option, syslog_facility);
}
~syslog_sink() { ::closelog(); }
syslog_sink(const syslog_sink&) = delete;
syslog_sink& operator=(const syslog_sink&) = delete;
void
log(const details::log_msg& msg) override {
::syslog(syslog_prio_from_level(msg), "%s", msg.raw.str().c_str());
}
void
flush() override {}
private:
std::array<int, 10> _priorities;
// must store the ident because the man says openlog might use the pointer as is and not a
// string copy
const std::string _ident;
//
// Simply maps spdlog's log level to syslog priority level.
//
int
syslog_prio_from_level(const details::log_msg& msg) const {
return _priorities[static_cast<int>(msg.level)];
}
};
}
}
#endif
@@ -0,0 +1,151 @@
//
// Copyright(c) 2015 Gabi Melman.
// Distributed under the MIT License (http://opensource.org/licenses/MIT)
//
// spdlog main header file.
// see example.cpp for usage example
#pragma once
#include "common.h"
#include "logger.h"
namespace spdlog {
// Return an existing logger or nullptr if a logger with such name doesn't exist.
// Examples:
//
// spdlog::get("mylog")->info("Hello");
// auto logger = spdlog::get("mylog");
// logger.info("This is another message" , x, y, z);
// logger.info() << "This is another message" << x << y << z;
std::shared_ptr<logger> get(const std::string& name);
//
// Set global formatting
// example: spdlog::set_pattern("%Y-%m-%d %H:%M:%S.%e %l : %v");
//
void set_pattern(const std::string& format_string);
void set_formatter(formatter_ptr f);
//
// Set global logging level for
//
void set_level(level::level_enum log_level);
//
// Turn on async mode (off by default) and set the queue size for each async_logger.
// effective only for loggers created after this call.
// queue_size: size of queue (must be power of 2):
// Each logger will pre-allocate a dedicated queue with queue_size entries upon construction.
//
// async_overflow_policy (optional, block_retry by default):
// async_overflow_policy::block_retry - if queue is full, block until queue has room for the new
// log entry.
// async_overflow_policy::discard_log_msg - never block and discard any new messages when queue
// overflows.
//
// worker_warmup_cb (optional):
// callback function that will be called in worker thread upon start (can be used to init stuff
// like thread affinity)
//
void set_async_mode(
size_t queue_size,
const async_overflow_policy overflow_policy = async_overflow_policy::block_retry,
const std::function<void()>& worker_warmup_cb = nullptr,
const std::chrono::milliseconds& flush_interval_ms = std::chrono::milliseconds::zero());
// Turn off async mode
void set_sync_mode();
//
// Create and register multi/single threaded rotating file logger
//
std::shared_ptr<logger> rotating_logger_mt(const std::string& logger_name,
const std::string& filenameB,
size_t max_file_size,
size_t max_files,
bool force_flush = false);
std::shared_ptr<logger> rotating_logger_st(const std::string& logger_name,
const std::string& filename,
size_t max_file_size,
size_t max_files,
bool force_flush = false);
//
// Create file logger which creates new file on the given time (default in midnight):
//
std::shared_ptr<logger> daily_logger_mt(const std::string& logger_name,
const std::string& filename,
int hour = 0,
int minute = 0,
bool force_flush = false);
std::shared_ptr<logger> daily_logger_st(const std::string& logger_name,
const std::string& filename,
int hour = 0,
int minute = 0,
bool force_flush = false);
//
// Create and register stdout/stderr loggers
//
std::shared_ptr<logger> stdout_logger_mt(const std::string& logger_name);
std::shared_ptr<logger> stdout_logger_st(const std::string& logger_name);
std::shared_ptr<logger> stderr_logger_mt(const std::string& logger_name);
std::shared_ptr<logger> stderr_logger_st(const std::string& logger_name);
//
// Create and register a syslog logger
//
#ifdef __linux__
std::shared_ptr<logger>
syslog_logger(const std::string& logger_name, const std::string& ident = "", int syslog_option = 0);
#endif
// Create and register a logger with multiple sinks
std::shared_ptr<logger> create(const std::string& logger_name, sinks_init_list sinks);
template <class It>
std::shared_ptr<logger>
create(const std::string& logger_name, const It& sinks_begin, const It& sinks_end);
// Create and register a logger with templated sink type
// Example: spdlog::create<daily_file_sink_st>("mylog", "dailylog_filename", "txt");
template <typename Sink, typename... Args>
std::shared_ptr<spdlog::logger> create(const std::string& logger_name, Args...);
// Register the given logger with the given name
void register_logger(std::shared_ptr<logger> logger);
// Drop the reference to the given logger
void drop(const std::string& name);
// Drop all references
void drop_all();
///////////////////////////////////////////////////////////////////////////////
//
// Macros to be display source file & line
// Trace & Debug can be switched on/off at compile time for zero cost debug statements.
// Uncomment SPDLOG_DEBUG_ON/SPDLOG_TRACE_ON in teakme.h to enable.
//
// Example:
// spdlog::set_level(spdlog::level::debug);
// SPDLOG_DEBUG(my_logger, "Some debug message {} {}", 1, 3.2);
///////////////////////////////////////////////////////////////////////////////
#ifdef SPDLOG_TRACE_ON
#define SPDLOG_TRACE(logger, ...) \
logger->trace(__VA_ARGS__) << " (" << __FILE__ << " #" << __LINE__ << ")";
#else
#define SPDLOG_TRACE(logger, ...)
#endif
#ifdef SPDLOG_DEBUG_ON
#define SPDLOG_DEBUG(logger, ...) \
logger->debug(__VA_ARGS__) << " (" << __FILE__ << " #" << __LINE__ << ")";
#else
#define SPDLOG_DEBUG(logger, ...)
#endif
}
#include "details/spdlog_impl.h"
@@ -0,0 +1,54 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <json/value.h>
#include <string>
namespace etix {
namespace cameradar {
struct stream_model {
// Ex : "172.16.100.113"
std::string address;
// Ex : 8554
unsigned int port;
// Ex : "admin"
std::string username = "";
// Ex : "123456"
std::string password = "";
// Ex : "live.sdp"
std::string route = "";
// Ex : "rtsp"
std::string service_name;
// Ex : "Vivotek HDCam"
std::string product;
// Ex : "RTSP"
std::string protocol;
// Ex : "Open"
std::string state;
// Ex : "true"
bool path_found = false;
// Ex : "true"
bool ids_found = false;
// Ex : "/thumbnails/cameradar"
std::string thumbnail_path = "";
};
Json::Value deserialize(const stream_model& model);
}
}
@@ -0,0 +1,48 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cachemanager.h> // cacheManager
#include <cameradar_task.h> // task interface
#include <describe.h> // send DESCRIBE through cURL
#include <future> // std::async & std::future
#include <signal_handler.h> // signals
#include <stream_model.h> // data model
namespace etix {
namespace cameradar {
class brutelogs : public etix::cameradar::cameradar_task {
const configuration& conf;
std::shared_ptr<cache_manager> cache;
std::string nmap_output;
public:
brutelogs() = delete;
brutelogs(std::shared_ptr<cache_manager> cache,
const configuration& conf,
std::string nmap_output)
: conf(conf), cache(cache), nmap_output(nmap_output) {}
brutelogs(const brutelogs& ref) = delete;
virtual bool run() const;
bool test_ids(const etix::cameradar::stream_model& cit,
const std::string& pit,
const std::string& uit) const;
bool bruteforce_camera(const stream_model& stream) const;
};
}
}
@@ -0,0 +1,49 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cachemanager.h> // cacheManager
#include <cameradar_task.h> // task interface
#include <curl/curl.h> // cURL client for discovery
#include <describe.h> // send DESCRIBE through cURL
#include <future> // std::async & std::future
#include <logger.h> // LOG
#include <memory> // std::shared_ptr
#include <signal_handler.h> // signals
#include <stream_model.h> // data model
namespace etix {
namespace cameradar {
class brutepath : public etix::cameradar::cameradar_task {
const configuration& conf;
std::shared_ptr<cache_manager> cache;
std::string nmap_output;
public:
brutepath() = delete;
brutepath(std::shared_ptr<cache_manager> cache,
const configuration& conf,
std::string nmap_output)
: conf(conf), cache(cache), nmap_output(nmap_output) {}
brutepath(const brutepath& ref) = delete;
virtual bool run() const;
bool test_path(const etix::cameradar::stream_model& cit, const std::string& it) const;
bool bruteforce_camera(const stream_model& stream) const;
};
}
}
@@ -0,0 +1,43 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cameradar_task.h> // task interface
#include <launch_command.h> // launch_command
#include <algorithm> // launch_command
#include <fmt.h> // fmt
#include <stream_model.h> // data model
#include <cachemanager.h> // cacheManager
namespace etix {
namespace cameradar {
class mapping : public etix::cameradar::cameradar_task {
const configuration& conf;
std::shared_ptr<cache_manager> cache;
std::string nmap_output;
public:
mapping() = delete;
mapping(std::shared_ptr<cache_manager> cache,
const configuration& conf,
std::string nmap_output)
: conf(conf), cache(cache), nmap_output(nmap_output) {}
mapping(const mapping& ref) = delete;
virtual bool run() const;
};
}
}
@@ -0,0 +1,45 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cameradar_task.h> // task interface
#include <tinyxml.h> // parsing
#include <stream_model.h> // data model
#include <cachemanager.h> // cacheManager
namespace etix {
namespace cameradar {
class parsing : public etix::cameradar::cameradar_task {
const configuration& conf;
std::shared_ptr<cache_manager> cache;
std::string nmap_output;
public:
parsing() = delete;
parsing(std::shared_ptr<cache_manager> cache,
const configuration& conf,
std::string nmap_output)
: conf(conf), cache(cache), nmap_output(nmap_output) {}
parsing(const parsing& ref) = delete;
virtual bool run() const;
void parse_camera(TiXmlElement*, std::vector<etix::cameradar::stream_model>& data) const;
bool print_detected_cameras(const std::vector<etix::cameradar::stream_model>& data) const;
};
}
}
@@ -0,0 +1,40 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cachemanager.h> // cacheManager
#include <cameradar_task.h> // task interface
#include <fstream> // std::ofstream
#include <iostream> // std::ofstream
#include <stream_model.h> // data model
namespace etix {
namespace cameradar {
class print : public etix::cameradar::cameradar_task {
const configuration& conf;
std::shared_ptr<cache_manager> cache;
std::string nmap_output;
public:
print() = delete;
print(std::shared_ptr<cache_manager> cache, const configuration& conf, std::string nmap_output)
: conf(conf), cache(cache), nmap_output(nmap_output) {}
print(const print& ref) = delete;
virtual bool run() const;
};
}
}
@@ -0,0 +1,45 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cameradar_task.h> // task interface
#include <signal_handler.h> // signals
#include <stream_model.h> // data model
#include <cachemanager.h> // cacheManager
#include <glib.h>
#include <gst/gst.h>
#include <gst/gstparse.h>
namespace etix {
namespace cameradar {
class stream_check : public etix::cameradar::cameradar_task {
const configuration& conf;
std::shared_ptr<cache_manager> cache;
std::string nmap_output;
public:
stream_check() = delete;
stream_check(std::shared_ptr<cache_manager> cache,
const configuration& conf,
std::string nmap_output)
: conf(conf), cache(cache), nmap_output(nmap_output) {}
stream_check(const stream_check& ref) = delete;
virtual bool run() const;
};
}
}
@@ -0,0 +1,48 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cachemanager.h> // cacheManager
#include <cameradar_task.h> // task interface
#include <fmt.h> // fmt
#include <future> // std::async & std::future
#include <launch_command.h> // launch_command
#include <rtsp_path.h> // make_path
#include <signal_handler.h> // signals
#include <stream_model.h> // data model
namespace etix {
namespace cameradar {
class thumbnail : public etix::cameradar::cameradar_task {
const configuration& conf;
std::shared_ptr<cache_manager> cache;
std::string nmap_output;
public:
thumbnail() = delete;
thumbnail(std::shared_ptr<cache_manager> cache,
const configuration& conf,
std::string nmap_output)
: conf(conf), cache(cache), nmap_output(nmap_output) {}
thumbnail(const thumbnail& ref) = delete;
virtual bool run() const;
std::string build_output_file_path(const std::string& path) const;
bool generate_thumbnail(const stream_model& stream) const;
};
}
}
+303
View File
@@ -0,0 +1,303 @@
/*
www.sourceforge.net/projects/tinyxml
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any
damages arising from the use of this software.
Permission is granted to anyone to use this software for any
purpose, including commercial applications, and to alter it and
redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product documentation
would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and
must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source
distribution.
*/
#ifndef TIXML_USE_STL
#ifndef TIXML_STRING_INCLUDED
#define TIXML_STRING_INCLUDED
#include <assert.h>
#include <string.h>
/* The support for explicit isn't that universal, and it isn't really
required - it is used to check that the TiXmlString class isn't
incorrectly
used. Be nice to old compilers and macro it here:
*/
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
// Microsoft visual studio, version 6 and higher.
#define TIXML_EXPLICIT explicit
#elif defined(__GNUC__) && (__GNUC__ >= 3)
// GCC version 3 and higher.s
#define TIXML_EXPLICIT explicit
#else
#define TIXML_EXPLICIT
#endif
/*
TiXmlString is an emulation of a subset of the std::string template.
Its purpose is to allow compiling TinyXML on compilers with no or poor STL
support.
Only the member functions relevant to the TinyXML project have been
implemented.
The buffer allocation is made by a simplistic power of 2 like mechanism : if
we increase
a string and there's no more room, we allocate a buffer twice as big as we
need.
*/
class TiXmlString {
public:
// The size type used
typedef size_t size_type;
// Error value for find primitive
static const size_type npos; // = -1;
// TiXmlString empty constructor
TiXmlString() : rep_(&nullrep_) {}
// TiXmlString copy constructor
TiXmlString(const TiXmlString& copy) : rep_(0) {
init(copy.length());
memcpy(start(), copy.data(), length());
}
// TiXmlString constructor, based on a string
TIXML_EXPLICIT
TiXmlString(const char* copy)
: rep_(0) {
init(static_cast<size_type>(strlen(copy)));
memcpy(start(), copy, length());
}
// TiXmlString constructor, based on a string
TIXML_EXPLICIT
TiXmlString(const char* str, size_type len)
: rep_(0) {
init(len);
memcpy(start(), str, len);
}
// TiXmlString destructor
~TiXmlString() { quit(); }
TiXmlString& operator=(const char* copy) { return assign(copy, (size_type)strlen(copy)); }
TiXmlString& operator=(const TiXmlString& copy) { return assign(copy.start(), copy.length()); }
// += operator. Maps to append
TiXmlString& operator+=(const char* suffix) {
return append(suffix, static_cast<size_type>(strlen(suffix)));
}
// += operator. Maps to append
TiXmlString& operator+=(char single) { return append(&single, 1); }
// += operator. Maps to append
TiXmlString& operator+=(const TiXmlString& suffix) {
return append(suffix.data(), suffix.length());
}
// Convert a TiXmlString into a null-terminated char *
const char*
c_str() const {
return rep_->str;
}
// Convert a TiXmlString into a char * (need not be null terminated).
const char*
data() const {
return rep_->str;
}
// Return the length of a TiXmlString
size_type
length() const {
return rep_->size;
}
// Alias for length()
size_type
size() const {
return rep_->size;
}
// Checks if a TiXmlString is empty
bool
empty() const {
return rep_->size == 0;
}
// Return capacity of string
size_type
capacity() const {
return rep_->capacity;
}
// single char extraction
const char&
at(size_type index) const {
assert(index < length());
return rep_->str[index];
}
// [] operator
char& operator[](size_type index) const {
assert(index < length());
return rep_->str[index];
}
// find a char in a string. Return TiXmlString::npos if not found
size_type
find(char lookup) const {
return find(lookup, 0);
}
// find a char in a string from an offset. Return TiXmlString::npos if not
// found
size_type
find(char tofind, size_type offset) const {
if (offset >= length()) return npos;
for (const char* p = c_str() + offset; *p != '\0'; ++p) {
if (*p == tofind) return static_cast<size_type>(p - c_str());
}
return npos;
}
void
clear() {
// Lee:
// The original was just too strange, though correct:
// TiXmlString().swap(*this);
// Instead use the quit & re-init:
quit();
init(0, 0);
}
/* Function to reserve a big amount of data when we know we'll need it. Be
aware that this
function DOES NOT clear the content of the TiXmlString if any exists.
*/
void reserve(size_type cap);
TiXmlString& assign(const char* str, size_type len);
TiXmlString& append(const char* str, size_type len);
void
swap(TiXmlString& other) {
Rep* r = rep_;
rep_ = other.rep_;
other.rep_ = r;
}
private:
void
init(size_type sz) {
init(sz, sz);
}
void
set_size(size_type sz) {
rep_->str[rep_->size = sz] = '\0';
}
char*
start() const {
return rep_->str;
}
char*
finish() const {
return rep_->str + rep_->size;
}
struct Rep {
size_type size, capacity;
char str[1];
};
void
init(size_type sz, size_type cap) {
if (cap) {
// Lee: the original form:
// rep_ = static_cast<Rep*>(operator new(sizeof(Rep) + cap));
// doesn't work in some cases of new being overloaded. Switching
// to the normal allocation, although use an 'int' for systems
// that are overly picky about structure alignment.
const size_type bytesNeeded = sizeof(Rep) + cap;
const size_type intsNeeded = (bytesNeeded + sizeof(int) - 1) / sizeof(int);
rep_ = reinterpret_cast<Rep*>(new int[intsNeeded]);
rep_->str[rep_->size = sz] = '\0';
rep_->capacity = cap;
} else {
rep_ = &nullrep_;
}
}
void
quit() {
if (rep_ != &nullrep_) {
// The rep_ is really an array of ints. (see the allocator, above).
// Cast it back before delete, so the compiler won't incorrectly call
// destructors.
delete[](reinterpret_cast<int*>(rep_));
}
}
Rep* rep_;
static Rep nullrep_;
};
inline bool operator==(const TiXmlString& a, const TiXmlString& b) {
return (a.length() == b.length()) // optimization on some platforms
&& (strcmp(a.c_str(), b.c_str()) == 0); // actual compare
}
inline bool operator<(const TiXmlString& a, const TiXmlString& b) {
return strcmp(a.c_str(), b.c_str()) < 0;
}
inline bool operator!=(const TiXmlString& a, const TiXmlString& b) { return !(a == b); }
inline bool operator>(const TiXmlString& a, const TiXmlString& b) { return b < a; }
inline bool operator<=(const TiXmlString& a, const TiXmlString& b) { return !(b < a); }
inline bool operator>=(const TiXmlString& a, const TiXmlString& b) { return !(a < b); }
inline bool operator==(const TiXmlString& a, const char* b) { return strcmp(a.c_str(), b) == 0; }
inline bool operator==(const char* a, const TiXmlString& b) { return b == a; }
inline bool operator!=(const TiXmlString& a, const char* b) { return !(a == b); }
inline bool operator!=(const char* a, const TiXmlString& b) { return !(b == a); }
TiXmlString operator+(const TiXmlString& a, const TiXmlString& b);
TiXmlString operator+(const TiXmlString& a, const char* b);
TiXmlString operator+(const char* a, const TiXmlString& b);
/*
TiXmlOutStream is an emulation of std::ostream. It is based on TiXmlString.
Only the operators that we need for TinyXML have been developped.
*/
class TiXmlOutStream : public TiXmlString {
public:
// TiXmlOutStream << operator.
TiXmlOutStream& operator<<(const TiXmlString& in) {
*this += in;
return *this;
}
// TiXmlOutStream << operator.
TiXmlOutStream& operator<<(const char* in) {
*this += in;
return *this;
}
};
#endif // TIXML_STRING_INCLUDED
#endif // TIXML_USE_STL
File diff suppressed because it is too large Load Diff
+132
View File
@@ -0,0 +1,132 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "cachemanager.h" // for cache_manager
#include <algorithm> // for move
#include <dlfcn.h> // for dlerror, dlclose, dlopen, dlsym, etc
#include <logger.h> // for LOG_ERR_
#include <stdbool.h> // for bool, false, true
#include <errno.h>
namespace etix {
namespace cameradar {
#ifdef __APPLE__
const std::string cache_manager::PLUGIN_EXT = ".dylib";
#elif __linux__
const std::string cache_manager::PLUGIN_EXT = ".so";
#endif
const std::string cache_manager::default_symbol = "cache_manager_instance_new";
cache_manager::cache_manager(const std::string& path,
const std::string& name,
const std::string& symbol)
: name(name), path(path), symbol(symbol), handle(nullptr), ptr(nullptr) {}
cache_manager::cache_manager(cache_manager&& old)
: path(std::move(old.path)), symbol(std::move(old.symbol)) {
this->handle = old.handle;
old.handle = nullptr;
this->ptr = old.ptr;
old.ptr = nullptr;
}
cache_manager::~cache_manager() {
delete this->ptr;
if (this->handle) { dlclose(handle); }
}
bool
cache_manager::make_instance() {
cache_manager_iface* (*new_fn)() = nullptr;
// Gets the path to the dynamic library
auto real_path = this->make_full_path();
// Opens it to get the handle
this->handle = dlopen(real_path.c_str(), RTLD_LAZY);
if (this->handle == nullptr) {
std::cout << "error: " << dlerror() << std::endl;
LOG_ERR_("Failed to load cache manager: " + this->name + ", invalid path",
"cache manager loader");
return false;
} else {
// Gets the symbol and checks if the library is valid
*(void**)(&new_fn) = dlsym(this->handle, symbol.c_str());
if (dlerror() != nullptr) {
LOG_ERR_("Invalid cache manager package: " + this->name, "cache manager loader");
return false;
}
}
// Returns a string containing the most recent dl* error
dlerror();
// Instantiates the cache manager
this->ptr = (*new_fn)();
if (this->ptr == nullptr) {
LOG_ERR_("Invalid cache manager format: " + this->name, "cache manager loader");
return false;
}
return true;
}
// Generates a path as such : /libdumb_cache_manager.so
std::string
cache_manager::make_full_path() {
std::string full_path = this->path;
full_path += "/lib";
full_path += this->name;
full_path += "_cache_manager";
full_path += PLUGIN_EXT;
return full_path;
}
cache_manager_iface* cache_manager::operator->() { return this->ptr; }
const cache_manager_iface* cache_manager::operator->() const { return this->ptr; }
bool
operator==(std::nullptr_t nullp, const cache_manager& p) {
return p.ptr == nullp;
}
bool
operator==(const cache_manager& p, std::nullptr_t nullp) {
return p.ptr == nullp;
}
bool
operator!=(std::nullptr_t nullp, const cache_manager& p) {
return p.ptr != nullp;
}
bool
operator!=(const cache_manager& p, std::nullptr_t nullp) {
return p.ptr != nullp;
}
cache_manager_base&
cache_manager_base::get_instance() {
return *this;
}
} // cameradar
} // etix
+238
View File
@@ -0,0 +1,238 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <configuration.h> // configuration
#include <fstream> // std::ifstream
#include <unistd.h> // access, F_OK
namespace etix {
namespace cameradar {
const std::string configuration::name_ = "configuration";
// Read a file at the path "path"
// If the file is available we return the whole content as
// an std::string inside a pair
// otherwise return false and an empty string inside a pair
std::pair<bool, std::string>
read_file(const std::string& path) {
auto line = std::string{};
auto content = std::string{};
auto file = std::ifstream{ path };
if (file.is_open()) {
while (getline(file, line)) { content += line + "\n"; }
file.close();
} else {
return std::make_pair(false, std::string{});
}
return std::make_pair(true, content);
}
// Loads the IDS dictionary
bool
configuration::load_ids() {
std::string content;
LOG_DEBUG_("Trying to open ids file from " + this->rtsp_ids_file, "configuration");
if (this->rtsp_ids_file.size()) {
content = read_file(this->rtsp_ids_file.c_str()).second;
} else {
LOG_WARN_(
"No ids file detected in your configuration, Cameradar will use "
"the default one "
"instead.",
"configuration");
content = read_file(default_rtsp_ids_file).second;
}
if (content.size()) {
auto root = Json::Value();
auto reader = Json::Reader();
reader.parse(content, root);
for (unsigned int i = 0; i < root["username"].size(); i++) {
if (not root["username"][i].isString()) {
LOG_ERR_("\"username\" should be of type string", "configuration");
return false;
}
this->usernames.push_back(root["username"][i].asString());
}
for (unsigned int i = 0; i < root["password"].size(); i++) {
if (not root["password"][i].isString()) {
LOG_ERR_("\"password\" should be of type string", "configuration");
return false;
}
this->passwords.push_back(root["password"][i].asString());
}
return true;
} else {
LOG_ERR_(
"Could not load ids file. Make sure you provided a valid path in your "
"configuration file.",
"configuration");
return false;
}
}
// Loads the URL dictionary
bool
configuration::load_url() {
std::string content;
LOG_DEBUG_("Trying to open url file from " + this->rtsp_url_file, "configuration");
if (this->rtsp_url_file.size()) {
content = read_file(this->rtsp_url_file.c_str()).second;
} else {
LOG_WARN_(
"No url file detected in your configuration, Cameradar will use "
"the default one "
"instead.",
"configuration");
content = read_file(default_rtsp_url_file).second;
}
if (content.size()) {
auto root = Json::Value();
auto reader = Json::Reader();
reader.parse(content, root);
for (unsigned int i = 0; i < root["urls"].size(); i++) {
if (not root["urls"][i].isString()) {
LOG_ERR_("\"urls\" should be of type string", "configuration");
return false;
}
this->paths.push_back(root["urls"][i].asString());
}
return true;
} else {
LOG_ERR_(
"Could not load ids file. Make sure you provided a valid path in your "
"configuration file.",
"configuration");
return false;
}
}
std::pair<bool, configuration>
serialize(const Json::Value& root) {
std::pair<bool, configuration> ret;
try {
if (!root["ports"].isNull())
ret.second.ports = root["ports"].asString();
else
ret.second.ports = default_ports;
if (!root["subnets"].isNull())
ret.second.subnets = root["subnets"].asString();
else
ret.second.subnets = default_subnets;
if (!root["rtsp_ids_file"].isNull())
ret.second.rtsp_ids_file = root["rtsp_ids_file"].asString();
else
ret.second.rtsp_ids_file = default_rtsp_ids_file;
if (!root["rtsp_url_file"].isNull())
ret.second.rtsp_url_file = root["rtsp_url_file"].asString();
else
ret.second.rtsp_url_file = default_rtsp_url_file;
if (!root["thumbnail_storage_path"].isNull())
ret.second.thumbnail_storage_path = root["thumbnail_storage_path"].asString();
else
ret.second.thumbnail_storage_path = default_thumbnail_storage_path;
if (!root["cache_manager_path"].isNull())
ret.second.cache_manager_path = root["cache_manager_path"].asString();
else
ret.second.cache_manager_path = default_cache_manager_path;
if (!root["cache_manager_name"].isNull())
ret.second.cache_manager_name = root["cache_manager_name"].asString();
else
ret.second.cache_manager_name = default_cache_manager_name;
ret.first = true;
} catch (const std::exception& e) {
LOG_ERR_("Configuration failed : " + std::string(e.what()), "configuration");
ret.first = false;
}
return ret;
}
Json::Value
configuration::get_raw() const {
return this->raw_conf;
}
// Loads the configuration from a path
// Returns a pair containing a boolean value & the configuration.
// Will return true & valid configuration if success
// Otherwise false & empty configuration
std::pair<bool, configuration>
load(const std::pair<bool, etix::tool::opt_parse>& args) {
std::string path;
if (not args.second.exist("-c")) {
path = etix::cameradar::default_configuration_path;
LOG_WARN_("No custom path set, trying to use default path: " + path, "main");
} else {
path = args.second["-c"];
}
// Check if the file exists at the given path
if (access(path.c_str(), F_OK) == -1) {
LOG_ERR_("Can't access: " + path, "configuration");
return std::make_pair(false, configuration{});
}
// Get the content of the file
auto content = read_file(path);
if (not content.first) {
LOG_ERR_(
"Can't open configuration file, you should check your rights to "
"access the file",
"configuration");
return std::make_pair(false, configuration{});
}
// Parse & validate the json
auto root = Json::Value();
auto reader = Json::Reader();
auto parse_succes = reader.parse(content.second, root);
if (not parse_succes) {
LOG_ERR_("Can't load configuration, invalid json format:\n" +
reader.getFormattedErrorMessages(),
"configuration");
return std::make_pair(false, configuration{});
}
// Deserialize the json to a configuration struct
// and return
std::pair<bool, configuration> conf = serialize(root);
conf.second.raw_conf = root;
conf.first &= conf.second.load_url();
conf.first &= conf.second.load_ids();
if (args.second.exist("-s")) conf.second.subnets = args.second["-s"];
if (args.second.exist("-p")) conf.second.ports = args.second["-p"];
return conf;
}
}
}
+104
View File
@@ -0,0 +1,104 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <describe.h>
#include <mutex>
namespace etix {
namespace cameradar {
std::mutex m;
// Ugly workaround
size_t
write_data(void* buffer, size_t size, size_t nmemb, void* userp) {
// I'm sorry for this
// Forget you ever saw it
(void)userp;
if (not buffer || not size || not nmemb) return 0;
return size * nmemb;
}
// Sends a request to the camera using the OPTION method,
// then a DESCRIBE to check for valid IDs
// then another DESCIBE with IDs if an authentication is needed
bool
curl_describe(const std::string& path, bool logs) {
CURL* csession;
CURLcode res;
struct curl_slist* custom_msg = NULL;
char URL[256];
long rc;
m.lock();
curl_global_init(0);
m.unlock();
csession = curl_easy_init();
if (csession == NULL) return -1;
sprintf(URL, "%s", path.c_str());
// These are the options for all following cURL requests
// Activate verbose if debug is needed
curl_easy_setopt(csession, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(csession, CURLOPT_TIMEOUT, 1);
curl_easy_setopt(csession, CURLOPT_NOBODY, 1);
curl_easy_setopt(csession, CURLOPT_URL, URL);
curl_easy_setopt(csession, CURLOPT_RTSP_STREAM_URI, URL);
curl_easy_setopt(csession, CURLOPT_FOLLOWLOCATION, 0);
curl_easy_setopt(csession, CURLOPT_HEADER, 0);
curl_easy_setopt(csession, CURLOPT_VERBOSE, 0);
curl_easy_setopt(csession, CURLOPT_RTSP_REQUEST, CURL_RTSPREQ_OPTIONS);
curl_easy_setopt(csession, CURLOPT_WRITEFUNCTION, write_data);
// This request will handshake the stream's server, it should always return 200 OK
curl_easy_perform(csession);
curl_easy_getinfo(csession, CURLINFO_RESPONSE_CODE, &rc);
custom_msg = curl_slist_append(
custom_msg, "Accept: application/x-rtsp-mh, application/rtsl, application/sdp");
curl_easy_setopt(csession, CURLOPT_RTSPHEADER, custom_msg);
curl_easy_setopt(csession, CURLOPT_RTSP_REQUEST, CURL_RTSPREQ_DESCRIBE);
// This request will check if the given path is right without the need of encrypted ids
unsigned long pos = path.find("@");
if (pos != std::string::npos) {
std::string encoded = etix::tool::encode::encode64(path.substr(7, pos - 7));
custom_msg =
curl_slist_append(custom_msg, std::string("Authorization: Basic " + encoded).c_str());
curl_easy_setopt(csession, CURLOPT_RTSPHEADER, custom_msg);
curl_easy_setopt(csession, CURLOPT_RTSP_REQUEST, CURL_RTSPREQ_DESCRIBE);
// curl_easy_setopt(csession, CURLOPT_WRITEDATA, protofile);
// This request will check if the given ids are good
curl_easy_perform(csession); // will return 404 if good ids, 401 if bad ids
res = curl_easy_getinfo(csession, CURLINFO_RESPONSE_CODE, &rc);
} else {
curl_easy_perform(
csession); // will return 404 if no ids and bad route, 401 if ids, 200 is all ok
res = curl_easy_getinfo(csession, CURLINFO_RESPONSE_CODE, &rc);
}
curl_easy_cleanup(csession);
m.lock();
curl_global_cleanup();
m.unlock();
LOG_DEBUG_("[" + path + "] Response code : " + std::to_string(rc), "describe");
if (logs) {
// Some cameras return 400 instead of 401, don't know why.
// Some cameras timeout and then curl considers the status as 0
// GST-RTSP-SERVER returns 404 instead of 401, then 401 instead of 404.
if (rc != 401 && rc != 400 && rc && pos == std::string::npos)
LOG_INFO_("Unprotected camera discovered.", "brutelogs");
return ((res == CURLE_OK) && rc != 401 && rc != 400 && rc);
}
return ((res == CURLE_OK) && rc != 404 && rc != 400 && rc);
}
}
}
+108
View File
@@ -0,0 +1,108 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dispatcher.h"
namespace etix {
namespace cameradar {
using namespace std::chrono_literals;
// The main loop of the binary
void
dispatcher::run() {
if (not(*cache)->configure(std::make_shared<configuration>(conf))) {
LOG_ERR_(
"There was a problem with the cache manager, Cameradar can't work properly without "
"cache management",
"dispatcher");
return;
}
std::thread worker(&dispatcher::do_stuff, this);
using namespace std::chrono_literals;
// Catch CTRL+C signal
signal_handler::instance();
// Wait for event or end
while (signal_handler::instance().should_stop() not_eq stop_priority::stop &&
current != task::finished) {
std::this_thread::sleep_for(30ms);
}
if (doing_stuff()) {
LOG_INFO_("Waiting for a task to terminate", "dispatcher");
LOG_INFO_("Press CTRL+C again to force stop", "dispatcher");
}
// Waiting for task to cleanup / force stop command
while ((signal_handler::instance().should_stop() not_eq stop_priority::force_stop) and
doing_stuff()) {
std::this_thread::sleep_for(30ms);
}
worker.join();
}
// This loop is used to add all the tasks specified in the command line
// And then run them successively
void
dispatcher::do_stuff() {
if (opts.second.exist("-d")) {
queue.push_back(new etix::cameradar::mapping(cache, conf, nmap_output));
queue.push_back(new etix::cameradar::parsing(cache, conf, nmap_output));
}
if (opts.second.exist("-b")) {
if (opts.second.exist("--gst-rtsp-server")) {
queue.push_back(new etix::cameradar::brutepath(cache, conf, nmap_output));
queue.push_back(new etix::cameradar::brutelogs(cache, conf, nmap_output));
} else {
queue.push_back(new etix::cameradar::brutelogs(cache, conf, nmap_output));
queue.push_back(new etix::cameradar::brutepath(cache, conf, nmap_output));
}
}
if (opts.second.exist("-t")) {
queue.push_back(new etix::cameradar::thumbnail(cache, conf, nmap_output));
}
if (opts.second.exist("-g")) {
queue.push_back(new etix::cameradar::stream_check(cache, conf, nmap_output));
}
if (!opts.second.exist("-d") && !opts.second.exist("-b") && !opts.second.exist("-t") &&
!opts.second.exist("-g")) {
queue.push_back(new etix::cameradar::mapping(cache, conf, nmap_output));
queue.push_back(new etix::cameradar::parsing(cache, conf, nmap_output));
if (opts.second.exist("--gst-rtsp-server")) {
queue.push_back(new etix::cameradar::brutepath(cache, conf, nmap_output));
queue.push_back(new etix::cameradar::brutelogs(cache, conf, nmap_output));
} else {
queue.push_back(new etix::cameradar::brutelogs(cache, conf, nmap_output));
queue.push_back(new etix::cameradar::brutepath(cache, conf, nmap_output));
}
queue.push_back(new etix::cameradar::thumbnail(cache, conf, nmap_output));
queue.push_back(new etix::cameradar::stream_check(cache, conf, nmap_output));
}
queue.push_back(new etix::cameradar::print(cache, conf, nmap_output));
while (queue.size() > 0 && signal_handler::instance().should_stop() == stop_priority::running) {
if (queue.front()->run())
queue.pop_front();
else {
LOG_ERR_("An error occured in one of the tasks, Cameradar will now stop.",
"dispatcher");
break;
}
std::this_thread::sleep_for(30ms);
}
this->current = task::finished;
}
}
}
+124
View File
@@ -0,0 +1,124 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "encode.h"
#include <ctype.h>
namespace etix {
namespace tool {
namespace encode {
std::string
encode64(const std::string& str_to_encode) {
return base64_encode(reinterpret_cast<const unsigned char*>(str_to_encode.c_str()),
str_to_encode.length());
}
std::string
decode64(const std::string& str_to_decode) {
return base64_decode(str_to_decode);
}
static const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
static inline bool
is_base64(unsigned char c) {
return (isalnum(c) || (c == '+') || (c == '/'));
}
/* from external source */
std::string
base64_encode(unsigned char const* bytes_to_encode, unsigned int in_len) {
std::string ret;
int i = 0;
int j = 0;
unsigned char char_array_3[3];
unsigned char char_array_4[4];
while (in_len--) {
char_array_3[i++] = *(bytes_to_encode++);
if (i == 3) {
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (i = 0; (i < 4); i++) ret += base64_chars[char_array_4[i]];
i = 0;
}
}
if (i) {
for (j = i; j < 3; j++) char_array_3[j] = '\0';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (j = 0; (j < i + 1); j++) ret += base64_chars[char_array_4[j]];
while ((i++ < 3)) ret += '=';
}
return ret;
}
/* from external source */
std::string
base64_decode(std::string const& encoded_string) {
int in_len = encoded_string.size();
int i = 0;
int j = 0;
int in_ = 0;
unsigned char char_array_4[4], char_array_3[3];
std::string ret;
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
char_array_4[i++] = encoded_string[in_];
in_++;
if (i == 4) {
for (i = 0; i < 4; i++) char_array_4[i] = base64_chars.find(char_array_4[i]);
char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
for (i = 0; (i < 3); i++) ret += char_array_3[i];
i = 0;
}
}
if (i) {
for (j = i; j < 4; j++) char_array_4[j] = 0;
for (j = 0; j < 4; j++) char_array_4[j] = base64_chars.find(char_array_4[j]);
char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
for (j = 0; (j < i - 1); j++) ret += char_array_3[j];
}
return ret;
}
}
}
}
+124
View File
@@ -0,0 +1,124 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "fs.h"
#include <vector> // for std::vector
#include <sstream> // for std::stringstream
#include <pwd.h> // for getpwuid, passwd
#include <stddef.h> // for size_t
#include <sys/stat.h> // for stat, mkdir, S_ISDIR
#include <unistd.h> // for getuid
#include <fstream> // for std::ifstream
namespace etix {
namespace tool {
std::vector<std::string>
split(const std::string& s, char delim) {
std::vector<std::string> elems;
std::stringstream ss(s);
std::string item;
while (std::getline(ss, item, delim)) elems.push_back(item);
return elems;
}
namespace fs {
fs_error
is_folder(const std::string& folder) {
struct stat sb;
if (stat(folder.c_str(), &sb) == 0) {
if (S_ISDIR(sb.st_mode))
return fs_error::is_dir;
else
return fs_error::is_not_dir;
}
return fs_error::dont_exist;
}
bool
get_or_create_folder(const std::string& folder) {
bool status = false;
switch (is_folder(folder)) {
case fs_error::is_dir: status = true; break;
case fs_error::is_not_dir: status = false; break;
case fs_error::dont_exist: status = create_recursive_folder(folder); break;
}
return status;
}
bool
create_folder(const std::string& folder) {
if (mkdir(folder.c_str(), 0755) == 0) { return true; }
return false;
}
bool
create_recursive_folder(const std::string& folder) {
auto path_elems = split(folder, '/');
std::string current_path = folder[0] == '/' ? "/" : "";
for (const auto& elem : path_elems) {
current_path += elem;
if (is_folder(current_path) == fs_error::dont_exist) create_folder(current_path);
current_path += '/';
}
return true;
}
std::string
get_file_folder(std::string full_file_path) {
// remove ending slash
if (full_file_path.back() == '/') full_file_path.pop_back();
size_t last_slash_position = full_file_path.find_last_of('/');
// it there is no slash, there is no folder to return
if (last_slash_position == std::string::npos) return "";
return std::string(full_file_path, 0, last_slash_position);
}
std::string
home(void) {
struct passwd* passwdEnt = getpwuid(getuid());
return { passwdEnt->pw_dir };
}
bool
copy(const std::string& src, const std::string& dst) {
std::ifstream src_stream(src, std::ios::binary);
std::ofstream dst_stream(dst, std::ios::binary);
if (not src_stream.is_open()) return false;
dst_stream << src_stream.rdbuf();
return true;
}
} // fs
} // tool
} // etix
@@ -0,0 +1,39 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <launch_command.h>
namespace etix {
namespace cameradar {
// Launches a command and checks for the return value
bool
launch_command(const std::string& cmd) {
int status = system(cmd.c_str());
if (status < 0) {
LOG_ERR_("Error: " + std::string(strerror(errno)) + "", "dispatcher");
return false;
} else {
if (WIFEXITED(status)) {
LOG_DEBUG_("Program returned normally, exit code " +
std::to_string(WEXITSTATUS(status)),
"dispatcher");
return true;
} else
LOG_WARN_("Program exited abnormaly.", "dispatcher");
}
return false;
}
}
}
+138
View File
@@ -0,0 +1,138 @@
// Copyright 2016 Etix Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "version.h" // versionning
#include <dispatcher.h> // program loop
#include <fs.h> // fs::home
#include <iostream> // iostream
#include <opt_parse.h> // parsing opt
namespace cmrdr = etix::cameradar;
void
print_version() {
std::cout << "Cameradar version " << CAMERADAR_VERSION << std::endl;
std::cout << "Build " << CAMERADAR_VERSION_BUILD << std::endl;
}
// Command line parsing
std::pair<bool, etix::tool::opt_parse>
parse_cmdline(int argc, char* argv[]) {
auto opt_parse = etix::tool::opt_parse{ argc, argv };
opt_parse.optional("-s", "Set subnets (e.g.: `172.16.0.0/24`)", true);
opt_parse.optional("-p", "Set ports (e.g.: `554,8554`)", true);
opt_parse.optional("-c", "Path to the configuration file (-c /path/to/conf)", true);
opt_parse.optional("-l", "Set log level (-l 4 will only show warnings and errors)", true);
opt_parse.optional("-d", "Launch the discovery tool on the given subnet", false);
opt_parse.optional("-b", "Launch the bruteforce tool on all discovered devices", false);
opt_parse.optional("-t", "Generate thumbnails from detected cameras", false);
opt_parse.optional("-g", "Check if the stream can be opened with GStreamer", false);
opt_parse.optional("-v", "Display Cameradar's version", false);
opt_parse.optional("-h", "Display this help", false);
opt_parse.optional(
"--gst-rtsp-server",
"Change the order of the bruteforce to match GST RTSP Server's implementation of "
"RTSP. Some cameras and RTSP servers will use this standard instead of the more "
"standard one. For more information, see the README.md file.",
false);
opt_parse.execute();
if (opt_parse.exist("-h")) {
opt_parse.print_help();
return std::make_pair(false, opt_parse);
} else if (opt_parse.exist("-v")) {
print_version();
return std::make_pair(false, opt_parse);
} else if (opt_parse.has_error()) {
std::cout << "Usage: ./cameradar [option]\n\toptions:\n" << std::endl;
opt_parse.print_help();
return std::make_pair(false, opt_parse);
}
return std::make_pair(true, opt_parse);
}
// Check if a folder exists, is readable and writable
bool
check_folder(const std::string& path) {
struct stat sb;
if ((stat(path.c_str(), &sb) == 0) && (S_ISDIR(sb.st_mode)) && (sb.st_mode & S_IRUSR) &&
(sb.st_mode & S_IWUSR)) {
LOG_INFO_("Folder " + path + " is available and has sufficient rights", "main");
return true;
}
LOG_ERR_("Folder " + path + " has insufficient rights, please check your configuration",
"main");
return false;
}
// Check if the storage path is available
bool
check_storage_path(const std::string& thumbnail_storage_path) {
LOG_INFO_("Checking if storage path exists and are usable", "main");
return (check_folder(thumbnail_storage_path));
}
int
main(int argc, char* argv[]) {
etix::tool::logger::get_instance("cameradar").set_level(etix::tool::loglevel::DEBUG);
auto args = parse_cmdline(argc, argv);
if (not args.first) return EXIT_FAILURE;
print_version();
if (not args.second.exist("-l")) {
LOG_INFO_("No log level set, using log level 1", "main");
} else {
try {
int level = std::stoi(args.second["-l"]);
etix::tool::logger::get_instance("cameradar")
.set_level(static_cast<etix::tool::loglevel>(level));
} catch (...) {
LOG_ERR_("Invalid log level format, log level should be 1, 2, 4, 5 or 6", "main");
return EXIT_FAILURE;
}
}
// Try to load the configuration
auto conf = cmrdr::load(args);
if (not conf.first) { return EXIT_FAILURE; }
LOG_INFO_("Configuration successfully loaded", "main");
// If one of the path is invalid, exit
auto paths_ok = check_storage_path(conf.second.thumbnail_storage_path);
if (not paths_ok) { return EXIT_FAILURE; }
// Here we should get the cache manager but for now we will juste
// make a dumb cache manager
auto plug = std::make_shared<etix::cameradar::cache_manager>(conf.second.cache_manager_path,
conf.second.cache_manager_name);
if (not plug || not plug->make_instance()) {
LOG_ERR_(std::string("Invalid cache manager "), "cameradar");
return false;
}
LOG_INFO_("Launching Cameradar, press CTRL+C to gracefully stop", "main");
etix::cameradar::dispatcher disp(conf.second, plug, args);
disp.run();
LOG_WARN_("See ya !", "cameradar");
return EXIT_SUCCESS;
}

Some files were not shown because too many files have changed in this diff Show More