diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f1996d60a..cdd7a74fd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,7 +25,7 @@ jobs: with: driver-opts: image=moby/buildkit:v0.12.0 args: | - --namespace cortexneurons --path analyzers --registry_dockerhub ${{ secrets.REGISTRY_DOCKERHUB }} --registry_harbor ${{ secrets.REGISTRY_HARBOR }} ${{ (startsWith(github.ref, 'refs/tags') || github.event_name == 'schedule') && '--stable' || '' }} + --namespace cortexneurons --path analyzers --registry_dockerhub ${{ secrets.REGISTRY_DOCKERHUB }} ${{ (startsWith(github.ref, 'refs/tags') || github.event_name == 'schedule') && '--stable' || '' }} build_responders: name: Build Responders @@ -40,7 +40,7 @@ jobs: with: driver-opts: image=moby/buildkit:v0.12.0 args: | - --namespace cortexneurons --path responders --registry_dockerhub ${{ secrets.REGISTRY_DOCKERHUB }} --registry_harbor ${{ secrets.REGISTRY_HARBOR }} ${{ (startsWith(github.ref, 'refs/tags') || github.event_name == 'schedule') && '--stable' || '' }} + --namespace cortexneurons --path responders --registry_dockerhub ${{ secrets.REGISTRY_DOCKERHUB }} ${{ (startsWith(github.ref, 'refs/tags') || github.event_name == 'schedule') && '--stable' || '' }} build_catalog: name: Build Catalog diff --git a/.gitignore b/.gitignore index d52f58ee7..d6cfa74d5 100644 --- a/.gitignore +++ b/.gitignore @@ -19,8 +19,9 @@ pyvenv.cfg share test-doc +test_doc analyzers/*/input analyzers/*/output responders/*/input responders/*/output -analyzers/*/cortexutils \ No newline at end of file +analyzers/*/cortexutils diff --git a/CHANGELOG.md b/CHANGELOG.md index ef8ccf9df..b663d4521 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,239 @@ # Changelog -## [3.2.0](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.0) +## [3.3.8](https://github.com/TheHive-Project/Cortex-Analyzers/tree/HEAD) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.7...3.3.8) + +**Closed issues:** + +- \[FR\] JAMF Protect Prevent list responder [\#1292](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1292) +- \[FR\] Add AWS Lambda responder [\#1289](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1289) +- \[FR\] Censys Analyzer v2 [\#1287](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1287) +- \[FR\] Fix the version of TheHive4py dependencies in existing responders [\#1281](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1281) +- \[Bug\] OpenCTI Analyzer [\#1280](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1280) +- \[Bug\] Phistank analyzer failing [\#1276](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1276) +- New Analyzer: QrDecode [\#1274](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1274) +- \[FR\] Update Triage Analyzer to Configure Sandbox API [\#1263](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1263) +- \[FR\] mail-subject dataType should be used instead of mail\_subject [\#1260](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1260) +- \[Bug\] Requirements don't get installed for new responder [\#1259](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1259) +- \[FR\] EclecticIQ Responder [\#1257](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1257) +- \[FR\] EclecticIQ Analyser [\#1255](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1255) +- \[FR\] Added capabilities/features for Microsoft Defender for Endpoint responder [\#1229](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1229) +- \[Bug\] Fortiguard parser error [\#1228](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1228) +- \[FR\]Binalyze AIR responder [\#1218](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1218) +- AWX Responder [\#1213](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1213) +- \[Bug\]\[URLhaus\_2\_0\] - Empty summary for positive results [\#1210](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1210) +- Add a responder to send case information to Telegram [\#1132](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1132) +- \[FR\] Add Microsoft 365 Defender responder for Tenant Allow/Block List [\#1102](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1102) +- \[FR\] Add EchoTrail analyzer [\#1099](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1099) +- Hybrid Analysis Analyzer not working anymore [\#1090](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1090) +- \[Bug\] KnowBe4 Responder Missing Config Options [\#1086](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1086) +- \[FR\] DNSDumpster analyzer [\#1056](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1056) +- \[FR\] Okta User Lookup Analyzer [\#1047](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1047) +- Abuse\_Finder\_3\_0 \[KeyError: '\\s'\] [\#940](https://github.com/TheHive-Project/Cortex-Analyzers/issues/940) +- TorBlutmagie\_1\_0 doesn't work \[Bug\] [\#829](https://github.com/TheHive-Project/Cortex-Analyzers/issues/829) +- New Analyzer: Fireeye Capa \(WIP\) [\#822](https://github.com/TheHive-Project/Cortex-Analyzers/issues/822) + +## [3.3.7](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.7) (2024-04-11) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.6...3.3.7) + +**Closed issues:** + +- \[Bug\] MISP\_2\_1 analyzer [\#1249](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1249) +- 'Triage' analyzer adapation to fit Recorded Future solution \(based on Triage\) [\#1237](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1237) +- \[Bug\] Proofpoint error: "Unexpected Error: Strings must be encoded before hashing" [\#1250](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1250) + +**Merged pull requests:** + +- \#1250 fix: use file\_digest to hash file [\#1251](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1251) ([To-om](https://github.com/To-om)) + +## [3.3.6](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.6) (2024-02-16) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.5...3.3.6) + +**Closed issues:** + +- \[Bug\] CrowdSec Analyzer: requests module missing [\#1227](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1227) + +**Merged pull requests:** + +- Update requirements.txt [\#1248](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1248) ([nusantara-self](https://github.com/nusantara-self)) +- Update requirements.txt [\#1247](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1247) ([cyberpescadito](https://github.com/cyberpescadito)) + +## [3.3.5](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.5) (2024-02-05) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.4...3.3.5) + +**Merged pull requests:** + +- Updated ONYPHE documentation. Fixed potential naming conflict with legacy analyzer. [\#1244](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1244) ([jimbobnet](https://github.com/jimbobnet)) +- New ONYPHE Search, ASM and Vulnscan analyzers. Updated Summary Analyzer. [\#1242](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1242) ([jimbobnet](https://github.com/jimbobnet)) +- Fix missing requirements.txt in CrowdSec Analyzer [\#1224](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1224) ([AlteredCoder](https://github.com/AlteredCoder)) +- StamusNetworks: fix error on empty network info [\#1220](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1220) ([regit](https://github.com/regit)) + +## [3.3.4](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.4) (2024-01-10) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.3...3.3.4) + +**Closed issues:** + +- New Analyzer: QR Code Parser [\#1238](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1238) +- \[FR\] Include additional intelligence from Recorded Future enrichment [\#1231](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1231) +- \[Bug\] Virustotal Analyzer Docker stuck "In Progress" [\#1239](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1239) + +## [3.3.3](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.3) (2023-12-28) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.2...3.3.3) + +**Closed issues:** + +- \[Bug\] Misp Analyzer [\#1235](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1235) + +**Merged pull requests:** + +- fix build for Autofocus and MalwareClustering [\#1233](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1233) ([vdebergue](https://github.com/vdebergue)) +- Fix build for several analyzers & responders [\#1230](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1230) ([vdebergue](https://github.com/vdebergue)) +- \#1231 Update Recorded Future Analyzer [\#1234](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1234) ([rpitts-recordedfuture](https://github.com/rpitts-recordedfuture)) +- Update Elasticsearch analyzer with unix format [\#1023](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1023) ([ajrios33](https://github.com/ajrios33)) + +## [3.3.2](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.2) (2023-08-28) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.1...3.3.2) + +**Closed issues:** + +- \[Bug\] sveral fixes for 3.3.1 [\#1214](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1214) + +**Merged pull requests:** + +- Fix/sekoiaio analyzer [\#1216](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1216) ([TonioRyo](https://github.com/TonioRyo)) + +## [3.3.1](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.1) (2023-08-18) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.3.0...3.3.1) + +## [3.3.0](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.3.0) (2023-08-16) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.9...3.3.0) + +**Closed issues:** + +- \[FR\] Azure Sign In Retriever [\#1211](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1211) +- \[Bug\] Azure Revoke Session Token Responder [\#1202](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1202) +- \[FR\] Add Bypass option for Duo Security responder [\#1200](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1200) +- Missing requirements from analyzers [\#1171](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1171) +- \[Bug\] Checkpoint responder not building [\#1209](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1209) +- \[Bug\] VirusTotal get report ip\_addresses do not return 'resolutions' [\#1204](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1204) +- \[Bug\] VirusTotal get report ip\_addresses do not return report summary [\#1203](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1203) +- \[Bug\] OpenCTI Analyser [\#1182](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1182) +- \[FR\] Rename LastInfoSec Analyzer to Gatewatcher and add feature [\#1152](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1152) +- HarfangLab responder contribution [\#1125](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1125) + +**Merged pull requests:** + +- Adding 'Object' key to event filters [\#1185](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1185) ([cyberpescadito](https://github.com/cyberpescadito)) +- Rectified a typo error and added a requirement, Added a requirements.txt file [\#1172](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1172) ([Black-Pearl25](https://github.com/Black-Pearl25)) +- \[NEW\] Jupyter analyzer+responder for Cortex [\#1199](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1199) ([LetMeR00t](https://github.com/LetMeR00t)) +- Fix analyzer and long report templates [\#1196](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1196) ([TonioRyo](https://github.com/TonioRyo)) +- Editing bug related to https://github.com \#1182 [\#1183](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1183) ([FormindMPO](https://github.com/FormindMPO)) +- Rename 'LastInfoSec' analyzer to 'Gatewatcher CTI' and add feature [\#1153](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1153) ([remydewa](https://github.com/remydewa)) +- fix lacking json enclosure [\#1144](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1144) ([topi-chan](https://github.com/topi-chan)) +- HarfangLab EDR responder contribution [\#1126](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1126) ([Pierre-HarfangLab](https://github.com/Pierre-HarfangLab)) + +## [3.2.9](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.9) (2023-05-04) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.8...3.2.9) + +**Closed issues:** + +- \[FR\] in-progress - Analyzer - Crowdstrike API to enrich observables [\#1176](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1176) + +## [3.2.8](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.8) (2023-03-09) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.7...3.2.8) + +**Closed issues:** + +- \[Bug\] Eml Parser except Exception as e: error \(Extra Spaces\) in parse.py [\#1168](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1168) + +## [3.2.7](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.7) (2023-03-09) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.6...3.2.7) + +**Merged pull requests:** + +- fix: unexpected identations [\#1167](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1167) ([GDumail](https://github.com/GDumail)) + +## [3.2.6](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.6) (2023-03-02) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.5...3.2.6) + +**Merged pull requests:** + +- CrowdSec: Set user agent of crowdsec analyzer to crowdsec-cortex/v1.0.0 [\#1164](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1164) ([sbs2001](https://github.com/sbs2001)) + +## [3.2.5](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.5) (2023-03-01) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.4...3.2.5) + +## [3.2.4](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.4) (2023-03-01) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.3...3.2.4) + +**Closed issues:** + +- \[Bug\] Analyzer Crt\_sh\_Transparency\_Logs\_1\_0 not working [\#1139](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1139) +- \[Bug\] Analyzer GoogleDNS\_resolve\_1\_0\_0 not working [\#1136](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1136) +- \[Bug\] Container for analyzer FalconSandbox missing dependencies [\#1108](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1108) +- \[FR\] New Analyzer: Palo Alto Wildfire Sandbox [\#910](https://github.com/TheHive-Project/Cortex-Analyzers/issues/910) +- \[Bug\] error with emlparser [\#1162](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1162) +- \[Bug\] ProofPoint\_Lookup\_1\_0 fails with "Strings must be encoded before hashing" [\#1160](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1160) +- \[Bug\] Analyzer Maltiverse\_Report\_1\_0 type url not working [\#1140](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1140) +- \[Bug\] Censys analyzer not working [\#1134](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1134) + +**Merged pull requests:** + +- Use github actions for CI [\#1165](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1165) ([vdebergue](https://github.com/vdebergue)) +- \#1160 Encode string before hashing [\#1161](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1161) ([To-om](https://github.com/To-om)) +- \#1086 Fix type in KnowBe4 configuration item [\#1159](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1159) ([To-om](https://github.com/To-om)) + +## [3.2.3](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.3) (2022-11-09) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.2...3.2.3) + +**Closed issues:** + +- \[Bug\] Falcon Responder: update python path [\#1131](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1131) +- \[Bug\] Virustotal not working correctly with proxy settings [\#1130](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1130) +- \[Bug\] MSDefender Responder has no module named cortexutils [\#1107](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1107) + +## [3.2.2](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.2) (2022-10-27) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.1...3.2.2) + +**Closed issues:** + +- update version of Emlparser report template [\#1129](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1129) + +## [3.2.1](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.1) (2022-10-25) + +[Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.2.0...3.2.1) + +**Closed issues:** + +- \[Bug\] fix perms on main programs [\#1128](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1128) + +## [3.2.0](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.2.0) (2022-10-21) [Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.1.1...3.2.0) **Closed issues:** +- \[FR\] Upgrade OpenCTI analyzer for v4 compatibility [\#929](https://github.com/TheHive-Project/Cortex-Analyzers/issues/929) +- Updates for documentation website [\#1113](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1113) - Build and manage images of private and custom analyzers/responders [\#1112](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1112) -- \[Bug\] MSDefender Responder has no module named cortexutils [\#1107](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1107) +- little improvements [\#1110](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1110) - \[FR\] Virustotal Analyzer and VT API v3? \(v2 will go offline soon\) [\#1012](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1012) - \[FR\] Verifalia analyzer [\#1007](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1007) - \[FR\] ThreatMiner analyzer [\#1005](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1005) @@ -15,9 +241,29 @@ - \[FR\] IP-API analyzer [\#1001](https://github.com/TheHive-Project/Cortex-Analyzers/issues/1001) - \[FR\] CheckPhish Analyzer [\#997](https://github.com/TheHive-Project/Cortex-Analyzers/issues/997) - \[FR\] Bitcoin Abuse Analyzer [\#995](https://github.com/TheHive-Project/Cortex-Analyzers/issues/995) -- \[FR\] Upgrade OpenCTI analyzer for v4 compatibility [\#929](https://github.com/TheHive-Project/Cortex-Analyzers/issues/929) - \[FR\] SentinelOne Hash Blacklister \(Responder\) [\#781](https://github.com/TheHive-Project/Cortex-Analyzers/issues/781) +**Merged pull requests:** + +- KasperskyTIP analyzer [\#1004](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1004) ([pjuhas](https://github.com/pjuhas)) +- Implement Microsoft 365 Defender responder [\#1124](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1124) ([joeslazaro-cdw](https://github.com/joeslazaro-cdw)) +- Implement Palo Alto Cortex XDR responder [\#1123](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1123) ([joeslazaro-cdw](https://github.com/joeslazaro-cdw)) +- Add Crowdsec CTI analyzer [\#1116](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1116) ([CERT-ARKEA](https://github.com/CERT-ARKEA)) +- Virustotal v3 [\#1111](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1111) ([lamachin3](https://github.com/lamachin3)) +- Improved emlParser [\#1109](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1109) ([lamachin3](https://github.com/lamachin3)) +- Implement EchoTrail analyzer [\#1100](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1100) ([joeslazaro-cdw](https://github.com/joeslazaro-cdw)) +- Implement CIS MCAP analyzer [\#1098](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1098) ([joeslazaro-cdw](https://github.com/joeslazaro-cdw)) +- Implement Palo Alto WildFire analyzer [\#1094](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1094) ([joeslazaro-cdw](https://github.com/joeslazaro-cdw)) +- feat: Improve templates for SEKOIA analyzers [\#1093](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1093) ([Darkheir](https://github.com/Darkheir)) +- Add authentication option for Maltiverse [\#1087](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1087) ([jlopezzarza](https://github.com/jlopezzarza)) +- Add additional taxonomy for IPinfo [\#1085](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1085) ([dafal](https://github.com/dafal)) +- Verifalia analyzer [\#1008](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1008) ([pjuhas](https://github.com/pjuhas)) +- ThreatMiner analyzer [\#1006](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1006) ([pjuhas](https://github.com/pjuhas)) +- IP-API analyzer [\#1002](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1002) ([pjuhas](https://github.com/pjuhas)) +- CheckPhish analyzer [\#1000](https://github.com/TheHive-Project/Cortex-Analyzers/pull/1000) ([pjuhas](https://github.com/pjuhas)) +- Bitcoin Abuse analyzer [\#999](https://github.com/TheHive-Project/Cortex-Analyzers/pull/999) ([pjuhas](https://github.com/pjuhas)) +- added initial sentinelOne responder [\#782](https://github.com/TheHive-Project/Cortex-Analyzers/pull/782) ([jobscry](https://github.com/jobscry)) + ## [3.1.1](https://github.com/TheHive-Project/Cortex-Analyzers/tree/3.1.1) (2022-06-21) [Full Changelog](https://github.com/TheHive-Project/Cortex-Analyzers/compare/3.1.0...3.1.1) @@ -158,7 +404,7 @@ - Add IVRE Analyzer [\#923](https://github.com/TheHive-Project/Cortex-Analyzers/pull/923) ([p-l-](https://github.com/p-l-)) - Feature/mail incident status [\#921](https://github.com/TheHive-Project/Cortex-Analyzers/pull/921) ([mkcorpc](https://github.com/mkcorpc)) - \[OSCD Initiative\] Add Azure Authentication Token Revokation Responder [\#906](https://github.com/TheHive-Project/Cortex-Analyzers/pull/906) ([Dmweiner](https://github.com/Dmweiner)) -- FIX issue \#896 [\#897](https://github.com/TheHive-Project/Cortex-Analyzers/pull/897) ([manwefm](https://github.com/manwefm)) +- FIX issue \#896 [\#897](https://github.com/TheHive-Project/Cortex-Analyzers/pull/897) ([ipfyx](https://github.com/ipfyx)) - \[Bug\] MineMeld responder domain IOC incorrect type \#892 [\#893](https://github.com/TheHive-Project/Cortex-Analyzers/pull/893) ([colin-stubbs](https://github.com/colin-stubbs)) - \[OSCD Initiative\] add Gmail responder [\#891](https://github.com/TheHive-Project/Cortex-Analyzers/pull/891) ([strassi](https://github.com/strassi)) - \[OSCD Initiative\] Add response for PaloAltoNGFW [\#886](https://github.com/TheHive-Project/Cortex-Analyzers/pull/886) ([Konakin](https://github.com/Konakin)) @@ -358,7 +604,7 @@ - change wot analyzer to support new api [\#777](https://github.com/TheHive-Project/Cortex-Analyzers/pull/777) ([dadokkio](https://github.com/dadokkio)) - add requests to requirements [\#775](https://github.com/TheHive-Project/Cortex-Analyzers/pull/775) ([dadokkio](https://github.com/dadokkio)) - \#759 module\_type removed in ThreatResponse [\#768](https://github.com/TheHive-Project/Cortex-Analyzers/pull/768) ([dadokkio](https://github.com/dadokkio)) -- Add new responder VirustotalDownloader \#765 [\#766](https://github.com/TheHive-Project/Cortex-Analyzers/pull/766) ([hariomenkel](https://github.com/hariomenkel)) +- Add new responder VirustotalDownloader \#765 [\#766](https://github.com/TheHive-Project/Cortex-Analyzers/pull/766) ([NexusFuzzy](https://github.com/NexusFuzzy)) - Add auth to Mailer and support for tasks [\#764](https://github.com/TheHive-Project/Cortex-Analyzers/pull/764) ([dadokkio](https://github.com/dadokkio)) - DomainTools Iris Analyzer Report Updates [\#760](https://github.com/TheHive-Project/Cortex-Analyzers/pull/760) ([ChuckWoodraska](https://github.com/ChuckWoodraska)) - sinkdb: fqdn support [\#756](https://github.com/TheHive-Project/Cortex-Analyzers/pull/756) ([dadokkio](https://github.com/dadokkio)) @@ -573,7 +819,7 @@ - fix when hash not found [\#485](https://github.com/TheHive-Project/Cortex-Analyzers/pull/485) ([garanews](https://github.com/garanews)) - Umbrella analyzer: query\_limit: error if no data provided [\#479](https://github.com/TheHive-Project/Cortex-Analyzers/pull/479) ([siisar](https://github.com/siisar)) - fixed Talos analyzer [\#546](https://github.com/TheHive-Project/Cortex-Analyzers/pull/546) ([0xmilkmix](https://github.com/0xmilkmix)) -- removed python builtins from requirements.txt [\#517](https://github.com/TheHive-Project/Cortex-Analyzers/pull/517) ([github-pba](https://github.com/github-pba)) +- removed python builtins from requirements.txt [\#517](https://github.com/TheHive-Project/Cortex-Analyzers/pull/517) ([ITServ-DE](https://github.com/ITServ-DE)) - Support for Cuckoo 2.0.7 and custom CA [\#514](https://github.com/TheHive-Project/Cortex-Analyzers/pull/514) ([1earch](https://github.com/1earch)) - updated joesandbox analyzer [\#512](https://github.com/TheHive-Project/Cortex-Analyzers/pull/512) ([garanews](https://github.com/garanews)) - Metadefender analyzer [\#510](https://github.com/TheHive-Project/Cortex-Analyzers/pull/510) ([garanews](https://github.com/garanews)) diff --git a/analyzers/AbuseIPDB/abuseipdb.py b/analyzers/AbuseIPDB/abuseipdb.py index cf1fb2851..88b54da78 100755 --- a/analyzers/AbuseIPDB/abuseipdb.py +++ b/analyzers/AbuseIPDB/abuseipdb.py @@ -14,6 +14,8 @@ class AbuseIPDBAnalyzer(Analyzer): def extract_abuse_ipdb_category(category_number): # Reference: https://www.abuseipdb.com/categories mapping = { + "1": "DNS Compromise", + "2": "DNS Poisoning", "3": "Fraud Orders", "4": "DDOS Attack", "5": "FTP Brute-Force", @@ -36,7 +38,7 @@ def extract_abuse_ipdb_category(category_number): "22": "SSH", "23": "IoT Targeted", } - return mapping.get(str(category_number), 'unknown category') + return mapping.get(str(category_number), 'Unknown Category') def run(self): @@ -76,11 +78,38 @@ def run(self): except Exception as e: self.unexpectedError(e) + def summary(self, raw): - taxonomies = [] + taxonomies = [] # level, namespace, predicate, value + + is_whitelisted = False + data = {} + if raw and 'values' in raw: + data = raw['values'][0]['data'] + else: + return {'taxonomies': []} + + if data.get('isWhitelisted', False): + is_whitelisted = True + taxonomies.append(self.build_taxonomy('info', 'AbuseIPDB', 'Is Whitelist', 'True')) + + if data.get('isTor', False): + taxonomies.append(self.build_taxonomy('info', 'AbuseIPDB', 'Is Tor', 'True')) - if raw and 'values' in raw and raw['values'][0]['data']['totalReports'] > 0 : - taxonomies.append(self.build_taxonomy('malicious', 'AbuseIPDB', 'Records', raw['values'][0]['data']['totalReports'])) + if 'usageType' in data: + taxonomies.append(self.build_taxonomy('info', 'AbuseIPDB', 'Usage Type', data['usageType'])) + + if 'abuseConfidenceScore' in data: + if data['abuseConfidenceScore'] > 0: + taxonomies.append(self.build_taxonomy('suspicious', 'AbuseIPDB', 'Abuse Confidence Score', data['abuseConfidenceScore'])) + else: + taxonomies.append(self.build_taxonomy('safe', 'AbuseIPDB', 'Abuse Confidence Score', 0)) + + if data['totalReports'] > 0 : + if is_whitelisted: + taxonomies.append(self.build_taxonomy('info', 'AbuseIPDB', 'Records', data['totalReports'])) + else: + taxonomies.append(self.build_taxonomy('malicious', 'AbuseIPDB', 'Records', data['totalReports'])) else: taxonomies.append(self.build_taxonomy('safe', 'AbuseIPDB', 'Records', 0)) diff --git a/analyzers/Abuse_Finder/abusefinder.py b/analyzers/Abuse_Finder/abusefinder.py index 21fc623ca..3db26aff5 100755 --- a/analyzers/Abuse_Finder/abusefinder.py +++ b/analyzers/Abuse_Finder/abusefinder.py @@ -15,16 +15,16 @@ class AbuseFinderAnalyzer(Analyzer): def summary(self, raw): - taxonomies = [] - if raw['abuse_finder'] and raw['abuse_finder'].get('abuse'): - for abuse in raw['abuse_finder']['abuse']: - taxonomies.append(self.build_taxonomy("info", "Abuse_Finder", "Address", abuse)) - else: - taxonomies.append(self.build_taxonomy("info", "Abuse_Finder", "Address", "None")) - return {"taxonomies": taxonomies} - - return {} + try: + if raw and raw['abuse_finder'].get('abuse'): + for abuse in raw['abuse_finder']['abuse']: + taxonomies.append(self.build_taxonomy("info", "Abuse_Finder", "Address", abuse)) + else: + taxonomies.append(self.build_taxonomy("info", "Abuse_Finder", "Address", "None")) + except: + pass + return {"taxonomies": taxonomies} def abuse(self): if self.data_type == "ip": diff --git a/analyzers/Abuse_Finder/requirements.txt b/analyzers/Abuse_Finder/requirements.txt index 5f5cf9711..9d21b7487 100644 --- a/analyzers/Abuse_Finder/requirements.txt +++ b/analyzers/Abuse_Finder/requirements.txt @@ -1,3 +1,3 @@ cortexutils -abuse_finder +abuse_finder>=0.3 future diff --git a/analyzers/AnyRun/AnyRun_Sandbox_Analysis.json b/analyzers/AnyRun/AnyRun_Sandbox_Analysis.json index 57bdd9759..f1a2a3e76 100644 --- a/analyzers/AnyRun/AnyRun_Sandbox_Analysis.json +++ b/analyzers/AnyRun/AnyRun_Sandbox_Analysis.json @@ -1,7 +1,7 @@ { "name": "AnyRun_Sandbox_Analysis", - "version": "1.0", - "author": "Andrea Garavaglia, Davide Arcuri, LDO-CERT", + "version": "1.1", + "author": "Andrea Garavaglia, Davide Arcuri, LDO-CERT; Nate Olsen, WSECU", "url": "https://github.com/TheHive-Project/Cortex-Analyzers", "license": "AGPL-V3", "description": "Any.Run Sandbox file analysis", @@ -31,6 +31,102 @@ "multi": false, "required": true, "defaultValue": true + }, + { + "name": "env_bitness", + "description": "default OS bitness; 32 or 64", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 32 + }, + { + "name": "env_version", + "description": "Which version of Windows do you want to use by default? allowed values: \"vista\", \"7\", \"8.1\", \"10\"", + "type": "string", + "multi": false, + "required": false, + "defaultValue": "7" + }, + { + "name": "env_type", + "description": "How much do you want pre-installed in the runtime environment? allowed values: \"clean\", \"office\", \"complete\"", + "type": "string", + "multi": false, + "required": false, + "defaultValue": "complete" + }, + { + "name": "opt_network_connect", + "description": "Do you want to disable networking? set false to disable", + "type": "boolean", + "multi": false, + "required": false, + "defaultValue": true + }, + { + "name": "opt_network_fakenet", + "description": "FakeNet feature status; set true to enable.", + "type": "boolean", + "multi": false, + "required": false, + "defaultValue": false + }, + { + "name": "opt_network_tor", + "description": "TOR using.", + "type": "Boolean", + "multi": false, + "required": false, + "defaultValue": false + }, + { + "name": "opt_network_mitm", + "description": "HTTPS MITM proxy option.", + "type": "Boolean", + "multi": false, + "required": false, + "defaultValue": false + }, + { + "name": "opt_network_geo", + "description": "Geo location option. Allowed values: \"fastest\", \"AU\", \"BR\", \"DE\", \"CH\", \"FR\", \"KR\", \"US\", \"RU\", \"GB\", \"IT\"", + "type": "String", + "multi": false, + "required": false, + "defaultValue": "fastest" + }, + { + "name": "opt_kernel_heavyevasion", + "description": "Heavy evasion option. Default value: false", + "type": "Boolean", + "multi": false, + "required": false, + "defaultValue": false + }, + { + "name": "opt_timeout", + "description": "Timeout option. Size range: 10-660", + "type": "Number", + "multi": false, + "required": false, + "defaultValue": "60" + }, + { + "name": "obj_ext_startfolder", + "description": "Start object from. Allowed values: \"desktop\", \"home\", \"downloads\", \"appdata\", \"temp\", \"windows\", \"root\"", + "type": "String", + "multi": false, + "required": false, + "defaultValue": "temp" + }, + { + "name": "obj_ext_browser", + "description": "Choose which browser to use. Allowed values: \"Google Chrome\", \"Mozilla Firefox\", \"Opera\", \"Internet Explorer\"", + "type": "String", + "multi": false, + "required": false, + "defaultValue": "Internet Explorer" } ], "registration_required": true, diff --git a/analyzers/AnyRun/README.md b/analyzers/AnyRun/README.md index 999e0e5de..2829e2d40 100644 --- a/analyzers/AnyRun/README.md +++ b/analyzers/AnyRun/README.md @@ -12,4 +12,19 @@ You need a valid AnyRun API integration subscription to use the analyzer. Free p - Provide your API token as a value for the `token` parameter. - Define the privacy setting in `privacy_type` parameter. -- Set `verify_ssl` parameter as false if you connection requires it \ No newline at end of file +- Set `verify_ssl` parameter as false if you connection requires it + +#### Optional Parameters +AnyRun provides a number of parameters that can be modified to do additional/different analysis. +- Set the "bitness" of your runtime environment with the `env_bitness` parameter. +- Select which version of Windows to use by setting `env_version` parameter. +- Select which products to install by default with `env_type` parameter. +- Enable/disable networking with `opt_network_connect` parameter. +- Enable/disable "FakeNet" with `opt_network_fakenet` parameter. +- Enable/disable the TOR network with `opt_network_tor` parameter. +- Enable/disable MITM for https connections with `opt_network_mitm` parameter. +- Need a specific geolocation? use `opt_network_geo` parameter. +- Need to analyze something with evasion tactics? `opt_kernel_heavyevasion` +- Change the timeout settings with `opt_timeout` parameter. +- Select which folder the analysis starts in with `obj_ext_startfolder` parameter. +- Select which browser to use for analysis with `obj_ext_browser` parameter. diff --git a/analyzers/AnyRun/anyrun_analyzer.py b/analyzers/AnyRun/anyrun_analyzer.py index 67a5e521f..94e8fdf0a 100755 --- a/analyzers/AnyRun/anyrun_analyzer.py +++ b/analyzers/AnyRun/anyrun_analyzer.py @@ -16,6 +16,18 @@ def __init__(self): self.verify_ssl = self.get_param("config.verify_ssl", True, None) if not self.verify_ssl: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) + self.env_bitness = self.get_param("config.env_bitness", None, None) + self.env_version = self.get_param("config.env_version", None, None) + self.env_type = self.get_param("config.env_type", None, None) + self.opt_network_connect = self.get_param("config.opt_network_connect", None, None) + self.opt_network_fakenet = self.get_param("config.opt_network_fakenet", None, None) + self.opt_network_tor = self.get_param("config.opt_network_tor", None, None) + self.opt_network_mitm = self.get_param("config.opt_network_mitm", None, None) + self.opt_network_geo = self.get_param("config.opt_network_geo", None, None) + self.opt_kernel_heavyevasion = self.get_param("config.opt_kernel_heavyevasion", None, None) + self.opt_timeout = self.get_param("config.opt_timeout", None, None) + self.obj_ext_startfolder = self.get_param("config.obj_ext_startfolder", None, None) + self.obj_ext_browser = self.get_param("config.obj_ext_browser", None, None) def summary(self, raw): taxonomies = [] @@ -50,7 +62,18 @@ def run(self): while status_code in (None, 429) and tries <= 15: with open(filepath, "rb") as sample: files = {"file": (filename, sample)} - data = {"opt_privacy_type": self.privacy_type} + data = {"opt_privacy_type": self.privacy_type, + "env_bitness": self.env_bitness, + "env_version": self.env_version, + "env_type": self.env_type, + "opt_network_connect": self.opt_network_connect, + "opt_network_fakenet": self.opt_network_fakenet, + "opt_network_tor": self.opt_network_tor, + "opt_network_mitm": self.opt_network_mitm, + "opt_network_geo": self.opt_network_geo, + "opt_kernel_heavyevasion": self.opt_kernel_heavyevasion, + "opt_timeout": self.opt_timeout, + "obj_ext_startfolder": self.obj_ext_startfolder } response = requests.post( "{0}/analysis".format(self.url), files=files, @@ -71,7 +94,20 @@ def run(self): self.error(response.json()["message"]) elif self.data_type == "url": url = self.get_param("data", None, "Url is missing") - data = {"obj_type": "url", "obj_url": url, "opt_privacy_type": self.privacy_type} + data = {"obj_type": "url", + "obj_url": url, + "opt_privacy_type": self.privacy_type, + "env_bitness": self.env_bitness, + "env_version": self.env_version, + "env_type": self.env_type, + "opt_network_connect": self.opt_network_connect, + "opt_network_fakenet": self.opt_network_fakenet, + "opt_network_tor": self.opt_network_tor, + "opt_network_mitm": self.opt_network_mitm, + "opt_network_geo": self.opt_network_geo, + "opt_kernel_heavyevasion": self.opt_kernel_heavyevasion, + "opt_timeout": self.opt_timeout, + "obj_ext_browser": self.obj_ext_browser } while status_code in (None, 429) and tries <= 15: response = requests.post( "{0}/analysis".format(self.url), @@ -130,4 +166,4 @@ def run(self): if __name__ == "__main__": - AnyRunAnalyzer().run() \ No newline at end of file + AnyRunAnalyzer().run() diff --git a/analyzers/BinalyzeAIR/README.md b/analyzers/BinalyzeAIR/README.md new file mode 100644 index 000000000..55c31ec8b --- /dev/null +++ b/analyzers/BinalyzeAIR/README.md @@ -0,0 +1,34 @@ +### What is Binalyze AIR? + +AIR is an "Automated Incident Response" platform that provides the complete feature set for: + +- Remotely collecting 300+ evidence types in minutes, +- Capturing the "Forensic State" of an endpoint as a well-organized HTML/JSON report, +- Performing triage on thousands of endpoints using YARA, +- Integrating with SIEM/SOAR/EDR products for automating the response phase IR, +- Enriching alerts for eliminating false positives, +- Investigating pre-cursors generated by other security products. + +#### What does this integration do? + +This responder lets you start acquisition and isolation of an endpoint with Binalyze AIR. + +##### Acquisition +One of the core features of AIR is collecting evidence remotely. This feature is made possible by "Acquisition Profiles," a group of different evidence categories. With this integration, you can use following profiles: + +- Full, +- Quick, +- Memory (RAM + PageFile), +- Event Logs, +- Browsing History, +- Compromise Assessment +- And much more! + +##### Isolation + +Endpoint isolation works by terminating all connections of an endpoint and not allowing any new connections. +When an endpoint is isolated, you can still perform tasks such as Acquisition. + +For more information, please refer to [Knowledge Base](https://kb.binalyze.com/) +The program uses [Binalyze AIR API](https://www.binalyze.com) + diff --git a/analyzers/BinalyzeAIR/assets/binalyze-logo.png b/analyzers/BinalyzeAIR/assets/binalyze-logo.png new file mode 100644 index 000000000..2b5ea0189 Binary files /dev/null and b/analyzers/BinalyzeAIR/assets/binalyze-logo.png differ diff --git a/analyzers/BinalyzeAIR/binalyze.py b/analyzers/BinalyzeAIR/binalyze.py new file mode 100644 index 000000000..6b24b6cc4 --- /dev/null +++ b/analyzers/BinalyzeAIR/binalyze.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 + +import requests +from typing import Dict, Any +from cortexutils.responder import Responder + +requests.packages.urllib3.disable_warnings() + + +class BinalyzeAIR(Responder): + def __init__(self): + Responder.__init__(self) + self.air_console_url = self.get_param( + "air_console_url", None, "Binalyze AIR console URL is missing!" + ) + self.air_api_key = self.get_param( + "air_api_key", None, "Binalyze AIR API key is missing!" + ) + self.proxies = self.get_param("config.proxy", None) + + self.headers: Dict[str, Any] = { + 'Authorization': f'Bearer {self.air_api_key}', + 'User-Agent': 'Binalyze AIR', + 'Content-type': 'application/json', + 'Accept-Charset': 'UTF-8' + } + self.service = self.get_param("config.service", None, "Service Missing!") + self.hostname = self.get_param('hostname', '', None, 'Hostname is Missing!') + self.organization_id = self.get_param('organization_id', 0) + + if self.service == 'acquire': + self.profile = self.get_param('profile', '') + self.case_id = self.get_param('case_id', '') + if self.service == 'isolation': + self.hostname = self.get_param('hostname', '') + self.isolation = self.get_param('isolation', '') + + def run(self): + Responder.run(self) + if self.service == "acquire": + if self.hostname is None: + self.error(f'{self.hostname} is Empty!') + return + if self.profile is None: + self.error(f'{self.profile} is Empty!') + return + if self.profile: + try: + profile = requests.get( + f'https://{self.air_console_url}/api/public/acquisitions/profiles?filter[name]={self.profile}&filter[organizationIds]=0', + headers=self.headers, verify=False).json()['result']['entities'][0]['_id'] + self.profile = profile + except Exception as ex: + self.error(f'{self.profile} is wrong!') + return + if self.case_id is None: + self.error(f'{self.case_id} is Empty!') + return + if self.organization_id is None: + self.error(f'{self.organization_id} is Empty!') + return + + payload: Dict[str, Any] = { + "caseId": self.case_id, + "droneConfig": { + "autoPilot": False, + "enabled": False + }, + "taskConfig": { + "choice": "use-policy" + }, + "acquisitionProfileId": self.profile, + "filter": { + "name": self.hostname, + "organizationIds": [self.organization_id] + } + } + response = requests.post( + f'{self.air_console_url}/api/public/acquisitions/acquire', + headers=self.headers, + json_data=payload + ) + if response.status_code == requests.codes.ok: + self.report({'message': f'Acquisition task has been started in {self.hostname}'}) + else: + self.error( + f'Error, unable to start acquisition task. I received {response.status_code} status code from Binalyze AIR!' + ) + + if self.service == "isolate": + if self.hostname is None: + self.error(f'{self.hostname} is Empty!') + return + if self.isolation is None: + self.error(f'{self.isolation} is Empty!') + return + if self.organization_id is None: + self.error(f'{self.organization_id} is Empty!') + return + if self.isolation is True: + payload: Dict[str, Any] = { + "enabled": True, + "filter": { + "name": self.hostname, + "organizationIds": [self.organization_id] + } + } + return + if self.isolation is False: + payload: Dict[str, Any] = { + "enabled": False, + "filter": { + "name": self.hostname, + "organizationIds": [self.organization_id] + } + } + return + + response = requests.post( + f'{self.air_console_url}/api/public/endpoints/tasks/isolation', + headers=self.headers, + json_data=payload + ) + if response.status_code == requests.codes.ok: + self.report({'message': f'Isolation task has been started in {self.hostname}'}) + else: + self.error( + f'Error, unable to start isolation task. I received {response.status_code} status code from Binalyze AIR!' + ) + + def operations(self, raw): + return [self.build_operation("AddTagToArtifact", tag=f"BinalyzeAIR:{self.service}d the endpoint.")] + + +if __name__ == "__main__": + BinalyzeAIR().run() \ No newline at end of file diff --git a/analyzers/BinalyzeAIR/binalyze_air_acquisition.json b/analyzers/BinalyzeAIR/binalyze_air_acquisition.json new file mode 100644 index 000000000..68d40487f --- /dev/null +++ b/analyzers/BinalyzeAIR/binalyze_air_acquisition.json @@ -0,0 +1,55 @@ +{ + "name": "Binalyze_AIR_Acquisition", + "version": "1.0", + "author": "Binalyze Integration Team", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Start an acquisition with Binalyze AIR.", + "dataTypeList": [ + "thehive:case_artifact" + ], + "command": "BinalyzeAIR/binalyze.py", + "config": { + "service": "air_acquire" + }, + "service_logo": { + "path": "assets/binalyze-logo.png", + "caption": "logo" + }, + "baseConfig": "BinalyzeAIR", + "configurationItems": [ + { + "name": "air_console_url", + "description": "Console URL", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "air_api_key", + "description": "API Key,", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "endpoint_hostname", + "description": "Endpoint Hostname", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "acquisition_name", + "description": "Acquisition name should match with the AIR console.", + "type": "string", + "multi": false, + "default": "quick", + "required": true + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.binalyze.com/air" +} diff --git a/analyzers/BinalyzeAIR/binalyze_air_isolation.json b/analyzers/BinalyzeAIR/binalyze_air_isolation.json new file mode 100644 index 000000000..a98c00f5a --- /dev/null +++ b/analyzers/BinalyzeAIR/binalyze_air_isolation.json @@ -0,0 +1,55 @@ +{ + "name": "Binalyze_AIR_Isolation", + "version": "1.0", + "author": "Binalyze Integration Team", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Isolate your endpoints with Binalyze AIR.", + "dataTypeList": [ + "thehive:case_artifact" + ], + "command": "BinalyzeAIR/air.py", + "config": { + "service": "air_isolate" + }, + "service_logo": { + "path": "assets/binalyze-logo.png", + "caption": "logo" + }, + "baseConfig": "BinalyzeAIR", + "configurationItems": [ + { + "name": "air_console_url", + "description": "Console URL", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "air_api_key", + "description": "API Key,", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "endpoint_hostname", + "description": "Endpoint Hostname", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "isolation", + "description": "Isolation operation", + "type": "boolean", + "multi": false, + "default": "true", + "required": true + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.binalyze.com/air" +} diff --git a/analyzers/BinalyzeAIR/requirements.txt b/analyzers/BinalyzeAIR/requirements.txt new file mode 100644 index 000000000..a5a9cc7df --- /dev/null +++ b/analyzers/BinalyzeAIR/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.31.0 +cortexutils>=2.2.0 diff --git a/analyzers/Capa/Capa.json b/analyzers/Capa/Capa.json new file mode 100755 index 000000000..439128882 --- /dev/null +++ b/analyzers/Capa/Capa.json @@ -0,0 +1,38 @@ +{ + "name": "Capa", + "version": "1.0", + "author": "Wes Lambert", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Analyze files with Capa", + "dataTypeList": ["file"], + "baseConfig": "Capa", + "config": { + "service": "CapaAnalyze" + }, + "command": "Capa/CapaAnalyze.py", + "configurationItems": [ + { + "name": "capa_path", + "description": "Path to Capa binary (if installed locally, should be /opt/Cortex-Analyzers/analyzers/Capa/capa)", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "/worker/Capa/capa" + } + ], + "registration_required": false, + "subscription_required": false, + "free_subscription": false, + "service_homepage": "https://github.com/mandiant/capa", + "service_logo": { + "path": "assets/capa.png", + "caption": "CAPA logo" + }, + "screenshots": [ + { + "path": "assets/long_report.png", + "caption": "CAPA: Long report template" + } + ] +} diff --git a/analyzers/Capa/CapaAnalyze.py b/analyzers/Capa/CapaAnalyze.py new file mode 100755 index 000000000..22e5fac96 --- /dev/null +++ b/analyzers/Capa/CapaAnalyze.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# encoding: utf-8 + +from cortexutils.analyzer import Analyzer +import os +import subprocess +import argparse +import json +import re +from collections import defaultdict + +class CapaAnalyzer(Analyzer): + def __init__(self): + Analyzer.__init__(self) + self.capa_path = self.get_param("config.capa_path", "/Cortex-Analyzers/analyzers/Capa/capa") + self.filepath = self.get_param('file', None, 'File parameter is missing.') + + def summary(self, raw): + taxonomies = [] + level = 'info' + namespace = 'Capa' + + predicate = 'CapaAnalyze' + taxonomies.append(self.build_taxonomy(level, namespace, predicate, "analyzed!")) + + return {"taxonomies": taxonomies} + + def run(self): + parser = argparse.ArgumentParser(description='exec capa.') + parser.add_argument('filepath', type=str, help='file path') + args = parser.parse_args() + + if os.path.exists(self.filepath): + f = subprocess.check_output([self.capa_path, '-j', self.filepath]) + process = json.loads(f) + rules = process['rules'] + tactics = [] + techniques = [] + subtechniques = [] + ids = [] + capabilities = {} + + for rule in rules: + try: + # Metadata + meta = process['rules'][rule]['meta'] + + # ATT&CK details + attack = meta['att&ck'][0] + + # ID + id = attack['id'] + + # Technique + technique = attack['technique'] + " - " + id + + # Subtechnique + subtechnique = attack['subtechnique'] + + # Tactic + tactic = attack['tactic'] + + # Capability + capability_name = process['rules'][rule]['meta']['name'] + + if tactic not in tactics: + tactics.append(tactic) + + if subtechnique != "": + if subtechnique not in subtechniques: + subtechniques.append(attack['subtechnique']) + + if technique not in techniques: + techniques.append(attack['technique']) + + if id not in ids: + ids.append(id) + + if tactic not in capabilities: + capabilities[tactic] = {} + + if technique not in capabilities[tactic]: + capabilities[tactic][technique] = [] + + if capability_name not in capabilities[tactic][technique]: + capabilities[tactic][technique].append(capability_name) + except: + continue + self.report({ 'capabilities': capabilities, 'tactics': tactics, 'techniques': techniques, 'subtechniques': subtechniques, 'ids': ids, 'rules': rules }) +if __name__ == '__main__': + CapaAnalyzer().run() diff --git a/analyzers/Capa/assets/capa.png b/analyzers/Capa/assets/capa.png new file mode 100644 index 000000000..712544794 Binary files /dev/null and b/analyzers/Capa/assets/capa.png differ diff --git a/analyzers/Capa/assets/long_report.png b/analyzers/Capa/assets/long_report.png new file mode 100644 index 000000000..fa4260e0d Binary files /dev/null and b/analyzers/Capa/assets/long_report.png differ diff --git a/analyzers/Capa/capa b/analyzers/Capa/capa new file mode 100755 index 000000000..599c5e231 Binary files /dev/null and b/analyzers/Capa/capa differ diff --git a/analyzers/Capa/requirements.txt b/analyzers/Capa/requirements.txt new file mode 100644 index 000000000..8ad52a568 --- /dev/null +++ b/analyzers/Capa/requirements.txt @@ -0,0 +1 @@ +cortexutils diff --git a/analyzers/Censys/Censys.json b/analyzers/Censys/Censys.json index 23e15ced3..b695c310f 100644 --- a/analyzers/Censys/Censys.json +++ b/analyzers/Censys/Censys.json @@ -1,16 +1,16 @@ { "name": "Censys", - "author": "Nils Kuhnert, CERT-Bund", + "author": "Nils Kuhnert, CERT-Bund; nusantara-self, StrangeBee", "license": "AGPL-V3", "url": "https://github.com/BSI-CERT-Bund/censys-analyzer", - "version": "1.0", + "version": "2.0", "description": "Check IPs, certificate hashes or domains against censys.io.", - "dataTypeList": ["ip", "hash", "domain", "other"], + "dataTypeList": ["ip", "hash", "domain"], "baseConfig": "Censys", "command": "Censys/censys_analyzer.py", "configurationItems": [ { - "name": "uid", + "name": "uid", "description": "UID for Censys", "type": "string", "multi": false, @@ -22,6 +22,14 @@ "type": "string", "multi": false, "required": true + }, + { + "name": "max_records", + "description": "Maximum number of records for domains", + "type": "number", + "multi": false, + "required": true, + "defaultvalue": 10 } ], "registration_required": true, diff --git a/analyzers/Censys/assets/censys.png b/analyzers/Censys/assets/censys.png index 400a38fa8..ac066a21d 100644 Binary files a/analyzers/Censys/assets/censys.png and b/analyzers/Censys/assets/censys.png differ diff --git a/analyzers/Censys/assets/long_report.png b/analyzers/Censys/assets/long_report.png index f612b2f59..8ca5a75c8 100644 Binary files a/analyzers/Censys/assets/long_report.png and b/analyzers/Censys/assets/long_report.png differ diff --git a/analyzers/Censys/censys_analyzer.py b/analyzers/Censys/censys_analyzer.py index 0e506e1ec..3bb21533a 100755 --- a/analyzers/Censys/censys_analyzer.py +++ b/analyzers/Censys/censys_analyzer.py @@ -1,14 +1,15 @@ #!/usr/bin/env python3 from cortexutils.analyzer import Analyzer -from censys.certificates import CensysCertificates -from censys.ipv4 import CensysIPv4 -from censys.websites import CensysWebsites -from censys.exceptions import ( +from censys.search import CensysHosts, CensysCerts + +from censys.common.exceptions import ( CensysNotFoundException, CensysRateLimitExceededException, CensysUnauthorizedException, ) +import iocextract + class CensysAnalyzer(Analyzer): def __init__(self): @@ -24,60 +25,41 @@ def __init__(self): None, "No API-Key for Censys given. Please add it to the cortex configuration.", ) - self.__fields = self.get_param( - 'parameters.fields', - ["updated_at", "ip"] - ) - self.__max_records = self.get_param( - 'parameters.max_records', - 1000 - ) - self.__flatten = self.get_param( - 'parameters.flatten', - True - ) + self.__max_records = self.get_param("config.max_records", None, 10) def search_hosts(self, ip): - """ - Searches for a host using its ipv4 address + c = CensysHosts(api_id=self.__uid, api_secret=self.__api_key) + query = c.search("ip: " + ip, per_page=1, pages=1) + for result in query: + return result + return {} - :param ip: ipv4 address as string - :type ip: str - :return: dict - """ - c = CensysIPv4(api_id=self.__uid, api_secret=self.__api_key) - return c.view(ip) def search_certificate(self, hash): - """ - Searches for a specific certificate using its hash + c = CensysCerts(api_id=self.__uid, api_secret=self.__api_key) + + try: + result = c.view(hash) + return result + except Exception as e: + self.error(f"Error fetching certificate: {str(e)}") + return {} + - :param hash: certificate hash - :type hash: str - :return: dict - """ - c = CensysCertificates(api_id=self.__uid, api_secret=self.__api_key) - return c.view(hash) def search_website(self, dom): - """ - Searches for a website using the domainname - :param dom: domain - :type dom: str - :return: dict - """ - c = CensysWebsites(api_id=self.__uid, api_secret=self.__api_key) - return c.view(dom) - - def search_ipv4(self, search): - """ - Searches for hosts in IPv4 base - :param search:search as string - :type search: str - :return: dict - """ - c = CensysIPv4(api_id=self.__uid, api_secret=self.__api_key) - return [x for x in c.search(search, fields=self.__fields, max_records=self.__max_records, flatten=self.__flatten)] + c = CensysHosts(api_id=self.__uid, api_secret=self.__api_key) + query = c.search("dns.names: " + dom, per_page=self.__max_records, pages=1) + for result in query: + return result + return {} + + + def search_freetext(self, search): + c = CensysHosts(api_id=self.__uid, api_secret=self.__api_key) + results = c.search(search, fields=self.__fields, max_records=self.__max_records, flatten=self.__flatten) + return [result for result in results] + def run(self): try: @@ -93,10 +75,10 @@ def run(self): self.report({ 'website': self.search_website(self.get_data()) }) - elif self.data_type == 'other': - self.report({ - 'matches': self.search_ipv4(self.get_data()) - }) + # elif self.data_type == 'other': + # self.report({ + # 'matches': self.search_freetext(self.get_data()) + # }) else: self.error( "Data type not supported. Please use this analyzer with data types hash, ip or domain." @@ -110,58 +92,96 @@ def run(self): except CensysRateLimitExceededException: self.error("Rate limit exceeded.") + def artifacts(self, raw): + artifacts = [] + ipv4s = list(iocextract.extract_ipv4s(str(raw))) + # ipv6s = list(iocextract.extract_ipv6s(str(raw))) + domains = list(iocextract.extract_urls(str(raw))) + hashes = list(iocextract.extract_hashes(str(raw))) + + if ipv4s: + ipv4s = list(dict.fromkeys(ipv4s)) + for i in ipv4s: + artifacts.append(self.build_artifact('ip', str(i))) + + # if ipv6s: + # ipv6s = list(dict.fromkeys(ipv6s)) + # for i in ipv6s: + # artifacts.append(self.build_artifact('ip', str(i))) + + if hashes: + hashes = list(dict.fromkeys(hashes)) + for j in hashes: + artifacts.append(self.build_artifact('hash', str(j))) + + if domains: + domains = list(dict.fromkeys(domains)) + for k in domains: + artifacts.append(self.build_artifact('url', str(k))) + return artifacts + def summary(self, raw): taxonomies = [] + if 'ip' in raw: - raw = raw['ip'] - service_count = len(raw.get('protocols', [])) - heartbleed = raw.get('443', {}).get('https', {}).get('heartbleed', {}).get('heartbleed_vulnerable', False) - taxonomies.append(self.build_taxonomy('info', 'Censys', 'OpenServices', service_count)) - if heartbleed: - taxonomies.append(self.build_taxonomy('malicious', 'Censys', 'Heartbleed', 'vulnerable')) + for ip_info in raw['ip']: + ip_address = ip_info.get('ip', 'Unknown IP') + asn = ip_info.get('autonomous_system', {}).get('asn', 'Unknown ASN') + country = ip_info.get('location', {}).get('country', 'Unknown Country') + city = ip_info.get('location', {}).get('city', 'Unknown City') + os_product = ip_info.get('operating_system', {}).get('product', 'Unknown OS') + service_count = len(ip_info.get('services', [])) + #taxonomies.append(self.build_taxonomy('info', 'Censys', 'IP', ip_address)) + #taxonomies.append(self.build_taxonomy('info', 'Censys', 'ASN', asn)) + #taxonomies.append(self.build_taxonomy('info', 'Censys', 'Country', country)) + #taxonomies.append(self.build_taxonomy('info', 'Censys', 'City', city)) + #taxonomies.append(self.build_taxonomy('info', 'Censys', 'OperatingSystem', os_product)) + taxonomies.append(self.build_taxonomy('info', 'Censys', 'OpenServices', service_count)) elif 'website' in raw: - raw = raw['website'] - service_count = len(raw.get('tags', [])) - taxonomies.append(self.build_taxonomy('info', 'Censys', 'OpenServices', service_count)) + taxonomies.append(self.build_taxonomy('info', 'Censys', 'recordsFound', len(raw["website"]))) + # for site in raw['website']: + # ip = site.get('ip', 'Unknown IP') + # asn = site.get('autonomous_system', {}).get('asn', 'Unknown ASN') + # country = site.get('location', {}).get('country', 'Unknown Country') + # service_count = len(site.get('services', [])) + # #taxonomies.append(self.build_taxonomy('info', 'Censys', 'IP', ip)) + # #taxonomies.append(self.build_taxonomy('info', 'Censys', 'ASN', asn)) + # taxonomies.append(self.build_taxonomy('info', 'Censys', 'Country', country)) + # taxonomies.append(self.build_taxonomy('info', 'Censys', 'Services', service_count)) elif 'cert' in raw: raw = raw['cert'] - trusted_count = len(raw.get('validation', [])) - validator_count = len(raw.get('validation', [])) - - for _, validator in raw.get("validation", []).items(): - if ( - validator.get("blacklisted", False) - or validator.get("in_revocation_set", False) - or ( - not validator.get("whitelisted", False) - and not validator.get("valid", False) - ) - ): - trusted_count -= 1 + validator_keys = ["nss", "microsoft", "apple", "chrome"] + validator_count = 0 + trusted_count = 0 + for key in validator_keys: + validator = raw.get("validation", {}).get(key, {}) + if validator.get("is_valid", False) and validator.get("has_trusted_path", False): + trusted_count += 1 + validator_count += 1 + if trusted_count < validator_count: taxonomies.append( self.build_taxonomy( "suspicious", "Censys", "TrustedCount", - "{}/{}".format(trusted_count, validator_count), + f"{trusted_count}/{validator_count}", ) ) else: - taxonomies.append(self.build_taxonomy('info', 'Censys', 'TrustedCount', '{}/{}'.format( - trusted_count, validator_count - ))) - - elif 'matches' in raw: - result_count = len(raw.get('matches', [])) - taxonomies.append(self.build_taxonomy('info', 'Censys ipv4 search', 'results', result_count)) - + taxonomies.append(self.build_taxonomy('info', 'Censys', 'TrustedCount', f'{trusted_count}/{validator_count}')) + + # elif 'matches' in raw: + # result_count = len(raw.get('matches', [])) + # taxonomies.append(self.build_taxonomy('info', 'Censys ipv4 search', 'results', result_count)) + return { 'taxonomies': taxonomies } + if __name__ == "__main__": - CensysAnalyzer().run() + CensysAnalyzer().run() \ No newline at end of file diff --git a/analyzers/Censys/requirements.txt b/analyzers/Censys/requirements.txt index 97dcd2863..b1c5694ff 100644 --- a/analyzers/Censys/requirements.txt +++ b/analyzers/Censys/requirements.txt @@ -1,2 +1,3 @@ cortexutils -censys==1.1.1 +censys~=2.2 +iocextract \ No newline at end of file diff --git a/analyzers/Crowdsec/Crowdsec_analyzer.json b/analyzers/Crowdsec/Crowdsec_analyzer.json index ed30a2028..b63c6db7c 100644 --- a/analyzers/Crowdsec/Crowdsec_analyzer.json +++ b/analyzers/Crowdsec/Crowdsec_analyzer.json @@ -1,6 +1,6 @@ { "name": "Crowdsec_Analyzer", - "version": "1.0", + "version": "1.1", "author": "CERT-ARKEA", "url": "https://github.com/TheHive-Project/Cortex-Analyzers", "license": "AGPL-V3", @@ -17,6 +17,78 @@ "type": "string", "multi": false, "required": true + }, + { + "name": "taxonomy_reputation", + "description": "Create taxonomy for reputation", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": true + }, + { + "name": "taxonomy_as_name", + "description": "Create taxonomy for AS name", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": false + }, + { + "name": "taxonomy_ip_range_score", + "description": "Create taxonomy for IP range score", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": false + }, + { + "name": "taxonomy_last_seen", + "description": "Create taxonomy for last seen date", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": false + }, + { + "name": "taxonomy_attack_details", + "description": "Create taxonomy for attack details", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": false + }, + { + "name": "taxonomy_behaviors", + "description": "Create taxonomy for behaviors", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": true + }, + { + "name": "taxonomy_mitre_techniques", + "description": "Create taxonomy for mitre techniques", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": false + }, + { + "name": "taxonomy_cves", + "description": "Create taxonomy for cves", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": true + }, + { + "name": "taxonomy_not_found", + "description": "Create taxonomy for not found IP", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": true } ], "registration_required": true, @@ -30,11 +102,11 @@ "screenshots": [ { "path": "assets/crowdsec-report-long.png", - "caption": "" + "caption": "CrowdSec analyzer: long report" }, { "path": "assets/crowdsec-analyzer-result-example.png", - "caption": "" + "caption": "CrowdSec analyzer: short report" } ] } diff --git a/analyzers/Crowdsec/README.md b/analyzers/Crowdsec/README.md index f79a28d7e..be4883912 100644 --- a/analyzers/Crowdsec/README.md +++ b/analyzers/Crowdsec/README.md @@ -2,6 +2,8 @@ Check [CrowdSec](https://www.crowdsec.net/) Threat Intelligence about an ip address. +For further information, please consult the [official documentation](https://doc.crowdsec.net/u/cti_api/integration_thehive/). + Running the analyzer will expose the result as taxonomies in the short report displayed in the ip observable. ![short result example](./assets/crowdsec-analyzer-result-example.png) @@ -10,87 +12,371 @@ The raw report contains the whole json response from CrowdSec. e.g.: -```javascript +```json { - "ip_range_score": 0, - "ip": "223.171.256.256", - "ip_range": "223.171.0.0/16", - "as_name": "LGTELECOM", - "as_num": 17853, + "ip": "192.42.116.218", + "reputation": "malicious", + "ip_range": "192.42.116.0/22", + "background_noise": "high", + "confidence": "high", + "background_noise_score": 10, + "ip_range_score": 5, + "as_name": "SURF B.V.", + "as_num": 1101, + "ip_range_24": "192.42.116.0/24", + "ip_range_24_reputation": "malicious", + "ip_range_24_score": 5, "location": { - "country": "KR", + "country": "NL", "city": null, - "latitude": 42, - "longitude": 42 + "latitude": 52.3824, + "longitude": 4.8995 }, - "reverse_dns": null, + "reverse_dns": "44.tor-exit.nothingtohide.nl", "behaviors": [ { - "name": "pop3/imap:bruteforce", - "label": "POP3/IMAP Bruteforce", - "description": "IP has been reported for performing a POP3/IMAP brute force attack." + "name": "tcp:scan", + "label": "TCP Scan", + "description": "IP has been reported for performing TCP port scanning.", + "references": [], + "$$hashKey": "object:984" + }, + { + "name": "http:bruteforce", + "label": "HTTP Bruteforce", + "description": "IP has been reported for performing a HTTP brute force attack (either generic HTTP probing or applicative related brute force).", + "references": [], + "$$hashKey": "object:985" + }, + { + "name": "http:exploit", + "label": "HTTP Exploit", + "description": "IP has been reported for attempting to exploit a vulnerability in a web application.", + "references": [], + "$$hashKey": "object:986" + }, + { + "name": "http:scan", + "label": "HTTP Scan", + "description": "IP has been reported for performing actions related to HTTP vulnerability scanning and discovery.", + "references": [], + "$$hashKey": "object:987" + }, + { + "name": "http:spam", + "label": "Web form spam", + "description": "IP has been reported trying to perform spam via web forms/forums.", + "references": [], + "$$hashKey": "object:988" + }, + { + "name": "generic:exploit", + "label": "Exploitation attempt", + "description": "IP has been reported trying to exploit known vulnerability/CVE on unspecified protocols.", + "references": [], + "$$hashKey": "object:989" + }, + { + "name": "ssh:bruteforce", + "label": "SSH Bruteforce", + "description": "IP has been reported for performing brute force on ssh services.", + "references": [], + "$$hashKey": "object:990" } ], "history": { - "first_seen": "2022-09-26T03:45:00+00:00", - "last_seen": "2022-10-11T08:15:00+00:00", - "full_age": 16, - "days_age": 15 + "first_seen": "2022-12-26T01:15:00+00:00", + "last_seen": "2024-07-31T10:00:00+00:00", + "full_age": 585, + "days_age": 584 }, "classifications": { "false_positives": [], - "classifications": [] + "classifications": [ + { + "name": "proxy:tor", + "label": "TOR exit node", + "description": "IP is being flagged as a TOR exit node.", + "references": [], + "$$hashKey": "object:1021" + }, + { + "name": "crowdsec:ai_vpn_proxy", + "label": "VPN or Proxy", + "description": "IP is identified as a VPN or a Proxy by CrowdSec AI Detection Algorithm.", + "references": [], + "$$hashKey": "object:1022" + }, + { + "name": "community-blocklist", + "label": "CrowdSec Community Blocklist", + "description": "IP belongs to the CrowdSec Community Blocklist", + "$$hashKey": "object:1023" + } + ] }, "attack_details": [ { - "name": "crowdsecurity/postfix-spam", - "label": "Postfix Bruteforce", - "description": "Detect spammers/postfix brute force", - "references": [] + "name": "firewallservices/pf-scan-multi_ports", + "label": "PF Scan Multi Ports", + "description": "ban IPs that are scanning us", + "references": [], + "$$hashKey": "object:1027" + }, + { + "name": "crowdsecurity/http-path-traversal-probing", + "label": "HTTP Path Traversal Exploit", + "description": "Detect path traversal attempt", + "references": [], + "$$hashKey": "object:1028" + }, + { + "name": "crowdsecurity/grafana-cve-2021-43798", + "label": "CVE-2021-43798", + "description": "Detect cve-2021-43798 exploitation attemps", + "references": [], + "$$hashKey": "object:1029" + }, + { + "name": "crowdsecurity/http-admin-interface-probing", + "label": "HTTP Admin Interface Probing", + "description": "Detect generic HTTP admin interface probing", + "references": [], + "$$hashKey": "object:1030" + }, + { + "name": "crowdsecurity/http-open-proxy", + "label": "HTTP Open Proxy Probing", + "description": "Detect scan for open proxy", + "references": [], + "$$hashKey": "object:1031" + }, + { + "name": "crowdsecurity/http-cve-probing", + "label": "HTTP CVE Probing", + "description": "Detect generic HTTP cve probing", + "references": [], + "$$hashKey": "object:1032" + }, + { + "name": "LePresidente/http-generic-403-bf", + "label": "HTTP Bruteforce", + "description": "Detect generic 403 Forbidden (Authorization) error brute force", + "references": [], + "$$hashKey": "object:1033" + }, + { + "name": "crowdsecurity/http-sqli-probbing-detection", + "label": "SQL Injection Attempt", + "description": "A scenario that detects SQL injection probing with minimal false positives", + "references": [], + "$$hashKey": "object:1034" + }, + { + "name": "crowdsecurity/http-sensitive-files", + "label": "Access to sensitive files over HTTP", + "description": "Detect attempt to access to sensitive files (.log, .db ..) or folders (.git)", + "references": [], + "$$hashKey": "object:1035" + }, + { + "name": "crowdsecurity/http-bad-user-agent", + "label": "Bad User Agent", + "description": "Detect usage of bad User Agent", + "references": [], + "$$hashKey": "object:1036" + }, + { + "name": "crowdsecurity/suricata-major-severity", + "label": "Suricata Severity 1 Event", + "description": "Detect exploit attempts via emerging threat rules", + "references": [], + "$$hashKey": "object:1037" + }, + { + "name": "crowdsecurity/ssh-bf", + "label": "SSH Bruteforce", + "description": "Detect ssh bruteforce", + "references": [], + "$$hashKey": "object:1038" + }, + { + "name": "crowdsecurity/apache_log4j2_cve-2021-44228", + "label": "Log4j CVE-2021-44228", + "description": "Detect cve-2021-44228 exploitation attemps", + "references": [], + "$$hashKey": "object:1039" + }, + { + "name": "crowdsecurity/http-bf-wordpress_bf_xmlrpc", + "label": "WP XMLRPC bruteforce", + "description": "detect wordpress bruteforce on xmlrpc", + "references": [], + "$$hashKey": "object:1040" + }, + { + "name": "crowdsecurity/ssh-slow-bf", + "label": "SSH Slow Bruteforce", + "description": "Detect slow ssh bruteforce", + "references": [], + "$$hashKey": "object:1041" + }, + { + "name": "crowdsecurity/http-bf-wordpress_bf", + "label": "WordPress Bruteforce", + "description": "Detect WordPress bruteforce on admin interface", + "references": [], + "$$hashKey": "object:1042" + }, + { + "name": "crowdsecurity/http-wordpress_wpconfig", + "label": "Access to WordPress wp-config.php", + "description": "Detect WordPress probing: variations around wp-config.php by wpscan", + "references": [], + "$$hashKey": "object:1043" + }, + { + "name": "crowdsecurity/http-xss-probbing", + "label": "XSS Attempt", + "description": "A scenario that detects XSS probing with minimal false positives", + "references": [], + "$$hashKey": "object:1044" + }, + { + "name": "crowdsecurity/modsecurity", + "label": "Modsecurity Alert", + "description": "Web exploitation via modsecurity", + "references": [], + "$$hashKey": "object:1045" + }, + { + "name": "crowdsecurity/http-probing", + "label": "HTTP Probing", + "description": "Detect site scanning/probing from a single ip", + "references": [], + "$$hashKey": "object:1046" } ], "target_countries": { - "DE": 25, - "FR": 25, - "PL": 25, - "SK": 25 + "US": 38, + "DE": 20, + "JP": 10, + "FR": 8, + "GB": 7, + "NL": 5, + "PL": 3, + "CA": 2, + "RU": 2, + "DK": 2 }, + "mitre_techniques": [ + { + "name": "T1595", + "label": "Active Scanning", + "description": "Adversaries may execute active reconnaissance scans to gather information that can be used during targeting.", + "references": [], + "$$hashKey": "object:1009" + }, + { + "name": "T1018", + "label": "Remote System Discovery", + "description": "Adversaries may attempt to get a listing of other systems by IP address, hostname, or other logical identifier on a network that may be used for Lateral Movement from the current system.", + "references": [], + "$$hashKey": "object:1010" + }, + { + "name": "T1046", + "label": "Network Service Discovery", + "description": "Adversaries may attempt to get a listing of services running on remote hosts and local network infrastructure devices, including those that may be vulnerable to remote software exploitation.", + "references": [], + "$$hashKey": "object:1011" + }, + { + "name": "T1110", + "label": "Brute Force", + "description": "Adversaries may use brute force techniques to gain access to accounts when passwords are unknown or when password hashes are obtained.", + "references": [], + "$$hashKey": "object:1012" + }, + { + "name": "T1190", + "label": "Exploit Public-Facing Application", + "description": "Adversaries may attempt to exploit a weakness in an Internet-facing host or system to initially access a network.", + "references": [], + "$$hashKey": "object:1013" + }, + { + "name": "T1589", + "label": "Gather Victim Identity Information", + "description": "Adversaries may gather information about the victim's identity that can be used during targeting.", + "references": [], + "$$hashKey": "object:1014" + } + ], + "cves": [ + "CVE-2021-43798", + "CVE-2021-44228" + ], "scores": { "overall": { - "aggressiveness": 0, + "aggressiveness": 5, "threat": 4, - "trust": 0, + "trust": 5, "anomaly": 1, - "total": 1 + "total": 5 }, "last_day": { - "aggressiveness": 0, - "threat": 0, - "trust": 0, + "aggressiveness": 5, + "threat": 4, + "trust": 5, "anomaly": 1, - "total": 0 + "total": 5 }, "last_week": { - "aggressiveness": 0, + "aggressiveness": 5, "threat": 4, - "trust": 0, + "trust": 5, "anomaly": 1, - "total": 1 + "total": 5 }, "last_month": { - "aggressiveness": 0, + "aggressiveness": 5, "threat": 4, - "trust": 0, + "trust": 5, "anomaly": 1, - "total": 1 + "total": 5 } }, - "references": [] + "references": [ + { + "name": "list:crowdsec_high_background_noise", + "label": "CrowdSec High Background Noise List", + "description": "Contains all IPs in our database that are considered as background noise. These IPs are not necessarily malicious, but they are considered as a potential threat. Proactively block these IPs if you want to reduce the noise on your systems.", + "references": [], + "$$hashKey": "object:1077" + }, + { + "name": "list:crowdsec_intelligence_blocklist", + "label": "CrowdSec Intelligence List", + "description": "Contains all IPs in our database that have been identified as actively aggressive, performing a wide variety of attacks. Proactively block these IPs if you don’t want to take any chances with malicious IPs potentially reaching your systems.", + "references": [], + "$$hashKey": "object:1078" + }, + { + "name": "list:firehol_botscout_7d", + "label": "Firehol BotScout list", + "description": "BotScout helps prevent automated web scripts, known as bots, from registering on forums, polluting databases, spreading spam, and abusing forms on web sites. They do this by tracking the names, IPs, and email addresses that bots use and logging them as unique signatures for future reference. This list is composed of the most recently-caught bots.", + "references": [ + "https://iplists.firehol.org/?ipset=botscout_7d" + ], + "$$hashKey": "object:1079" + } + ] } ``` #### Requirements -Provide a [CrowdSec CTI Api key](https://www.crowdsec.net/product/threat-intelligence#card-four) +Provide a [CrowdSec CTI Api key](https://docs.crowdsec.net/u/cti_api/getting_started/#getting-an-api-key) as a value for the `api_key` parameter. diff --git a/analyzers/Crowdsec/assets/crowdsec-analyzer-result-example.png b/analyzers/Crowdsec/assets/crowdsec-analyzer-result-example.png index 664fe322f..e09efea53 100644 Binary files a/analyzers/Crowdsec/assets/crowdsec-analyzer-result-example.png and b/analyzers/Crowdsec/assets/crowdsec-analyzer-result-example.png differ diff --git a/analyzers/Crowdsec/assets/crowdsec-report-long.png b/analyzers/Crowdsec/assets/crowdsec-report-long.png index 0474c1323..3deed2f1f 100644 Binary files a/analyzers/Crowdsec/assets/crowdsec-report-long.png and b/analyzers/Crowdsec/assets/crowdsec-report-long.png differ diff --git a/analyzers/Crowdsec/crowdsec_analyzer.py b/analyzers/Crowdsec/crowdsec_analyzer.py index 38e237283..97a195e27 100755 --- a/analyzers/Crowdsec/crowdsec_analyzer.py +++ b/analyzers/Crowdsec/crowdsec_analyzer.py @@ -2,16 +2,38 @@ from cortexutils.analyzer import Analyzer from crowdsec_api import Crowdsec -from datetime import datetime class CrowdsecAnalyzer(Analyzer): - def __init__(self): - Analyzer.__init__(self) - self.crowdsec_key = self.get_param("config.api_key", None, "Missing Crowdsec API key") + def __init__(self, job_directory=None): + Analyzer.__init__(self, job_directory) + self.crowdsec_key = self.get_param( + "config.api_key", None, "Missing Crowdsec API key" + ) + self.taxonomy_reputation = self.get_param( + "config.taxonomy_reputation", True, None + ) + self.taxonomy_as_name = self.get_param("config.taxonomy_as_name", False, None) + self.taxonomy_ip_range_score = self.get_param( + "config.taxonomy_ip_range_score", False, None + ) + self.taxonomy_last_seen = self.get_param( + "config.taxonomy_last_seen", False, None + ) + self.taxonomy_attack_details = self.get_param( + "config.taxonomy_attack_details", False, None + ) + self.taxonomy_behaviors = self.get_param( + "config.taxonomy_behaviors", True, None + ) + self.taxonomy_mitre_techniques = self.get_param( + "config.taxonomy_mitre_techniques", False, None + ) + self.taxonomy_cves = self.get_param("config.taxonomy_cves", True, None) + self.taxonomy_not_found = self.get_param( + "config.taxonomy_not_found", True, None + ) self.crowdsec_client = None - self.verbose_taxonomies = self.get_param("config.verbose_taxonomies", False) - self.polling_interval = self.get_param("config.polling_interval", 60) def summary(self, raw): taxonomies = [] @@ -19,38 +41,87 @@ def summary(self, raw): levelinfo = "info" levelorange = "suspicious" levelgreen = "safe" - - if 'as_name' in raw: - taxonomies.append(self.build_taxonomy(levelinfo, namespace, 'ASN', raw['as_name'])) - - if 'ip_range_score' in raw: - taxonomies.append(self.build_taxonomy(levelinfo, namespace, 'Score', raw['ip_range_score'])) - - if 'history' in raw: - taxonomies.append(self.build_taxonomy(levelinfo, namespace, 'LastSeen', raw['history']['last_seen'])) - - if 'attack_details' in raw: - for attack in raw['attack_details'] : - taxonomies.append(self.build_taxonomy(levelorange, namespace, 'Attack', attack['name'])) - - if len(taxonomies) == 0: - taxonomies.append(self.build_taxonomy(levelgreen, namespace, 'Threat', 'Not found')) - - ### uncomment for full taxonomies report - #if raw['attack_details']: - # for attackdetails in raw['attack_details'] : - # taxonomies.append(self.build_taxonomy(levelorange, namespace, 'Attack_details', attackdetails['name'])) + levelred = "malicious" + + if self.taxonomy_reputation and "reputation" in raw: + level = ( + levelred + if raw["reputation"] == "malicious" + else ( + levelorange + if raw["reputation"] == "suspicious" + else levelgreen if raw["reputation"] == "safe" else levelinfo + ) + ) + taxonomies.append( + self.build_taxonomy(level, namespace, "Reputation", raw["reputation"]) + ) + + if self.taxonomy_as_name and "as_name" in raw: + taxonomies.append( + self.build_taxonomy(levelinfo, namespace, "ASN", raw["as_name"]) + ) + + if self.taxonomy_ip_range_score and "ip_range_score" in raw: + taxonomies.append( + self.build_taxonomy( + levelinfo, namespace, "Score", raw["ip_range_score"] + ) + ) + + if self.taxonomy_last_seen and "history" in raw: + taxonomies.append( + self.build_taxonomy( + levelinfo, namespace, "LastSeen", raw["history"]["last_seen"] + ) + ) + + if self.taxonomy_attack_details and "attack_details" in raw: + for attack in raw["attack_details"]: + taxonomies.append( + self.build_taxonomy( + levelorange, namespace, "Attack", attack["name"] + ) + ) + + if self.taxonomy_behaviors and "behaviors" in raw: + for behavior in raw["behaviors"]: + taxonomies.append( + self.build_taxonomy( + levelorange, namespace, "Behavior", behavior["name"] + ) + ) + + if self.taxonomy_mitre_techniques and "mitre_techniques" in raw: + for mitre in raw["mitre_techniques"]: + taxonomies.append( + self.build_taxonomy(levelorange, namespace, "Mitre", mitre["name"]) + ) + + if self.taxonomy_cves and "cves" in raw: + for cve in raw["cves"]: + taxonomies.append( + self.build_taxonomy(levelorange, namespace, "CVE", cve) + ) + + if ( + self.taxonomy_not_found + and "reputation" not in raw + and "attack_details" not in raw + ): + taxonomies.append( + self.build_taxonomy(levelgreen, namespace, "Threat", "Not found") + ) return {"taxonomies": taxonomies} - def run(self): Analyzer.run(self) try: self.crowdsec_client = Crowdsec(self.crowdsec_key) data = self.get_param("data", None, "Data is missing") results = self.crowdsec_client.summary(data, self.data_type) - + self.report(results) except Exception: @@ -59,4 +130,3 @@ def run(self): if __name__ == "__main__": CrowdsecAnalyzer().run() - diff --git a/analyzers/Crowdsec/crowdsec_api.py b/analyzers/Crowdsec/crowdsec_api.py index 2e148a2cd..241da2e6a 100755 --- a/analyzers/Crowdsec/crowdsec_api.py +++ b/analyzers/Crowdsec/crowdsec_api.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 -from requests.compat import urljoin +import json + import requests +from requests.compat import urljoin class Crowdsec: @@ -10,7 +12,7 @@ class Crowdsec: """ def __init__(self, key: str): - """Intializes the API object + """Initializes the API object :param key: The Crowdsec API key :type key: str """ @@ -23,10 +25,10 @@ def _request(self, path: str): :type path: str """ headers = { - "x-api-key": self.api_key , - "accept": "application/json", - "User-Agent": "crowdsec-cortex/v1.0.0", - } + "x-api-key": self.api_key, + "accept": "application/json", + "User-Agent": "crowdsec-cortex/v1.1.0", + } url = urljoin(self.base_url, path) response = requests.get(url, headers=headers) @@ -34,15 +36,14 @@ def _request(self, path: str): raise APIRateLimiting(response.text) try: response_data = response.json() - except: + except json.JSONDecodeError: raise APIError("Couldn't parse response JSON") return response_data def summary(self, data: str, datatype: str): - """Return a summary of all information we have for the given IPv{4,6} address. - """ - if datatype == 'ip': + """Return a summary of all information we have for the given IPv{4,6} address.""" + if datatype == "ip": url_path = "/v2/smoke/{ip}".format(ip=data) return self._request(path=url_path) @@ -65,4 +66,3 @@ def __init__(self, value): def __str__(self): return self.value - diff --git a/analyzers/DNSdumpster/DNSdumpster.json b/analyzers/DNSdumpster/DNSdumpster.json new file mode 100644 index 000000000..4718f686b --- /dev/null +++ b/analyzers/DNSdumpster/DNSdumpster.json @@ -0,0 +1,16 @@ +{ + "name": "DNSdumpster_report", + "version": "1.0", + "author": "Keijo Korte - @korteke", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Query domain information from DNSdumpster.com.", + "dataTypeList": ["domain"], + "command": "DNSdumpster/dnsdumpster.py", + "baseConfig": "DNSdumpster", + "configurationItems": [], + "registration_required": false, + "subscription_required": false, + "free_subscription": false, + "service_homepage": "https://dnsdumpster.com" +} diff --git a/analyzers/DNSdumpster/README.md b/analyzers/DNSdumpster/README.md new file mode 100644 index 000000000..b38f582f9 --- /dev/null +++ b/analyzers/DNSdumpster/README.md @@ -0,0 +1,5 @@ +### DNSdumpster +This analyzer makes a call to the [DNSdumpster](https://dnsdumpster.com) service to enrich the Domain information. + +#### Usage +Nothing special. Doesn't need API-key or credentials. Just enable and use. \ No newline at end of file diff --git a/analyzers/DNSdumpster/dnsdumpster.py b/analyzers/DNSdumpster/dnsdumpster.py new file mode 100755 index 000000000..95e9723df --- /dev/null +++ b/analyzers/DNSdumpster/dnsdumpster.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +from __future__ import print_function +import requests +import re +import iocextract + +from bs4 import BeautifulSoup +from cortexutils.analyzer import Analyzer + + +class DNSdumpsterAnalyzer(Analyzer): + + def __init__(self): + Analyzer.__init__(self) + self.session = requests.Session() + self.baseurl = 'https://dnsdumpster.com' + + def run(self): + Analyzer.run(self) + if self.data_type == 'domain': + try: + domain = self.get_param('data', None, 'Observable is missing') + result = self.dnsdumpster_query(domain) + self.report({'result': result}) + except Exception as e: + self.error("Error: {}".format(e)) + else: + self.error('Invalid data type') + + def dnsdumpster_query(self, domain): + try: + r = self.session.get(self.baseurl) + except requests.ConnectionError as connerr: + self.error("Connection error. Error {}".format(connerr)) + + soup = BeautifulSoup(r.content, 'html.parser') + csrf_middleware = soup.findAll('input', attrs={'name': 'csrfmiddlewaretoken'})[0]['value'] + cookies = {'csrftoken': csrf_middleware} + headers = {'Referer': self.baseurl, + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' + 'Chrome/95.0.4638.69 Safari/537.36'} + data = {'csrfmiddlewaretoken': csrf_middleware, 'targetip': domain, 'user': 'free'} + r = self.session.post(self.baseurl, cookies=cookies, data=data, headers=headers) + + if r.status_code != 200: + self.error("Unexpected status code from. Status code: {}".format(r.status_code)) + return [] + + if 'There was an error getting results' in r.content.decode('utf-8'): + self.error("There was an error getting results") + return [] + + soup = BeautifulSoup(r.content, 'html.parser') + tables = soup.findAll('table') + res = {'domain': domain, 'dns_records': {}} + + res['dns_records']['dns'] = self.retrieve_results(tables[0]) + res['dns_records']['mx'] = self.retrieve_results(tables[1]) + res['dns_records']['txt'] = self.retrieve_txt_record(tables[2]) + res['dns_records']['host'] = self.retrieve_results(tables[3]) + res['dns_records']['map_url'] = '{}/static/map/{}.png'.format(self.baseurl, domain) + + return res + + def retrieve_txt_record(self, table): + res = [] + for td in table.findAll('td'): + res.append(td.text) + return res + + def retrieve_results(self, table): + res = [] + trs = table.findAll('tr') + for tr in trs: + tds = tr.findAll('td') + pattern_ip = r'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})' + try: + ip = re.findall(pattern_ip, tds[1].text)[0] + domain = str(tds[0]).split('
')[0].split('>')[1].split('<')[0] + header = ' '.join(tds[0].text.replace('\n', '').split(' ')[1:]) + reverse_dns = tds[1].find('span', attrs={}).text + + additional_info = tds[2].text + country = tds[2].find('span', attrs={}).text + autonomous_system = additional_info.split(' ')[0] + provider = ' '.join(additional_info.split(' ')[1:]) + provider = provider.replace(country, '') + data = {'domain': domain, + 'ip': ip, + 'reverse_dns': reverse_dns, + 'as': autonomous_system, + 'provider': provider, + 'country': country, + 'header': header} + res.append(data) + except Exception as err: + self.error("Unexpected error when parsing data from DNSdumpster.com. Error {}".format(err)) + return res + + def artifacts(self, raw): + artifacts = [] + ipv4s = list(iocextract.extract_ipv4s(str(raw))) + ipv6s = list(iocextract.extract_ipv6s(str(raw))) + domains = list(iocextract.extract_urls(str(raw))) + + if ipv4s: + ipv4s = list(dict.fromkeys(ipv4s)) + for i in ipv4s: + artifacts.append(self.build_artifact('ip', str(i))) + + if ipv6s: + ipv6s = list(dict.fromkeys(ipv6s)) + for j in ipv6s: + artifacts.append(self.build_artifact('ip', str(j))) + + if domains: + domains = list(dict.fromkeys(domains)) + for k in domains: + artifacts.append(self.build_artifact('url', str(k))) + + return artifacts + + def summary(self, raw): + taxonomies = [] + level = "info" + namespace = "DNSdumpster" + predicate = "Report" + value = "{}".format("OK") + + taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) + + return {"taxonomies": taxonomies} + + +if __name__ == '__main__': + DNSdumpsterAnalyzer().run() diff --git a/analyzers/DNSdumpster/requirements.txt b/analyzers/DNSdumpster/requirements.txt new file mode 100644 index 000000000..03ef6e2c6 --- /dev/null +++ b/analyzers/DNSdumpster/requirements.txt @@ -0,0 +1,4 @@ +cortexutils +requests +iocextract +beautifulsoup4 diff --git a/analyzers/DomainMailSPFDMARC/domainMailSPFDMARC_get_reports.json b/analyzers/DomainMailSPFDMARC/domainMailSPFDMARC_get_reports.json index 4b8dbab2c..35702f91f 100644 --- a/analyzers/DomainMailSPFDMARC/domainMailSPFDMARC_get_reports.json +++ b/analyzers/DomainMailSPFDMARC/domainMailSPFDMARC_get_reports.json @@ -1,5 +1,5 @@ { - "name": "DomainMailSPFDMARC_Analyzer", + "name": "DomainMailSPFDMARC", "version": "1.1", "url": "https://thehive-project.org", "author": "torsolaso", diff --git a/analyzers/EclecticIQ/EclecticIQ_SearchObservable.json b/analyzers/EclecticIQ/EclecticIQ_SearchObservable.json new file mode 100644 index 000000000..e8b260312 --- /dev/null +++ b/analyzers/EclecticIQ/EclecticIQ_SearchObservable.json @@ -0,0 +1,65 @@ +{ + "name": "EclecticIQ_SearchObservable", + "author": "BW", + "license": "AGPL-V3", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers/", + "version": "2.0", + "description": "Query EclecticIQ Intelligence Center for a specific observable.", + "dataTypeList": [ + "domain", + "ip", + "url", + "fqdn", + "uri_path", + "user-agent", + "hash", + "mail", + "mail_subject", + "registry", + "regexp", + "other", + "filename" + ], + "config": { + "service": "search_observable" + }, + "baseConfig": "EclecticIQ", + "command": "EclecticIQ/eclecticiq.py", + "configurationItems": [ + { + "name": "name", + "description": "Name of EclecticIQ instance", + "multi": false, + "required": false, + "type": "string" + }, + { + "name": "url", + "description": "URL of EclecticIQ instance", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "key", + "description": "API key for EclecticIQ instance", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "cert_check", + "description": "Verify server certificate", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": true + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.eclecticiq.com", + "service_logo": { "path": "assets/logo.png", "caption": "logo" }, + "screenshots": [] +} diff --git a/analyzers/EclecticIQ/README.md b/analyzers/EclecticIQ/README.md new file mode 100644 index 000000000..407eade0a --- /dev/null +++ b/analyzers/EclecticIQ/README.md @@ -0,0 +1,14 @@ +[EclecticIQ](https://www.eclecticiq.com/) is a cyber threat intelligence platform which provides aggregation and analysis capabilities for threat intelligence data and integration with organization assets. + +The analyzer comes in one flavor to look for an observable in the platform and return any parent entities and their context. + +- EclecticIQ\_**SearchObservable**: returns entity data for a specific observable + +#### Requirements + +The EclecticIQ analyzer requires you to have access to an [EclecticIQ Intelligence Center](https://www.eclecticiq.com/) instance. + +Three parameters are required for each instance to make the analyzer work: + +- `url` : URL of the instance, e.g. "https://intel-platform.local" +- `key` : API Key for a user of the EclecticIQ Intelligence Center instance diff --git a/analyzers/EclecticIQ/assets/logo.png b/analyzers/EclecticIQ/assets/logo.png new file mode 100644 index 000000000..d9d4b5ebb Binary files /dev/null and b/analyzers/EclecticIQ/assets/logo.png differ diff --git a/analyzers/EclecticIQ/eclecticiq.py b/analyzers/EclecticIQ/eclecticiq.py new file mode 100755 index 000000000..85b993663 --- /dev/null +++ b/analyzers/EclecticIQ/eclecticiq.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +import typing as tp + +import requests + +from cortexutils.analyzer import Analyzer + + +class EclecticIQAnalyzer(Analyzer): + """Searches for given Observables in configured EclecticIQ instance. + All standard Cortex data types are supported.""" + + def __init__(self): + Analyzer.__init__(self) + + self.service = self.get_param("config.service", default="search_observable") + + self.name = self.get_param( + "config.name", message="No EclecticIQ instance name given." + ) + self.url = self.get_param("config.url", message="No EclecticIQ url given.") + self.key = self.get_param("config.key", message="No EclecticIQ api key given.") + self.data = self.get_param("data", message="Data is missing") + + if self.get_param("config.cert_check", True): + self.ssl = self.get_param("config.cert_path", True) + else: + self.ssl = False + + self.session = requests.Session() + self.session.verify = self.ssl + self.session.proxies = self.get_param("config.proxy") + self.session.headers.update( + {"Accept": "application/json", "Authorization": f"Bearer {self.key}"} + ) + + def summary(self, raw): + level = "info" + namespace = "EIQ" + predicate = "API" + found = len(raw["results"].get("entities", [])) + value = f"Found {found} entities" if found > 0 else "Not found" + taxonomy = self.build_taxonomy(level, namespace, predicate, value) + return {"taxonomies": [taxonomy]} + + def get_source(self, url): + response = self.session.get(url) + return response.json()["data"]["name"] + + @staticmethod + def get_confidence(data): + confidence = data.get("confidence", None) + if isinstance(confidence, dict): + confidence = confidence.get("value") + return confidence + + def run(self): + """ + Query EclecticIQ instance for data by querying observable for + observable id and then querying entities endpoint for parent entities + + Return dict response to cortex + """ + + results = { + "name": self.name, + "url": self.url, + "obs_value": self.data, + } + obs_id = self.add_observable_info(results) + if not obs_id: + # exit early for no data + return self.report({}) + + entities_info = self.get_entities_info(obs_id) + if not entities_info: + # exit early for no data + return self.report({}) + + results["count"] = entities_info["count"] + results["entities"] = [] + for entity in entities_info["data"]: + source_name = self.get_source(entity["sources"][0]) + entity_data = entity.get("data", {}) + results["entities"].append( + { + "id": entity["id"], + "title": entity_data.get("title"), + "type": entity_data.get("type"), + "confidence": self.get_confidence(entity_data), + "tags": entity.get("meta", {}).get("tags"), + "timestamp": entity.get("meta", {}).get( + "estimated_threat_start_time" + ), + "source_name": source_name, + } + ) + + self.report({"results": results}) + + def add_observable_info(self, results: dict) -> tp.Optional[str]: + url = self.url + "/api/v2/observables" # set observable url + params = {"filter[value]": self.data} # use data in filter param + response = self.session.get(url, params=params) + if not response.json().get("count"): + return None + + data = response.json()["data"] + results["obs_type"] = data[0]["type"] + results["obs_score"] = data[0].get("meta", {}).get("maliciousness") + return data[0]["id"] + + def get_entities_info(self, obs_id: str) -> tp.Optional[dict]: + url = self.url + "/api/v2/entities" # set entity url + params = {"filter[observables]": obs_id} # use observable id in filter param + + response = self.session.get(url, params=params) + response_json = response.json() + + if not response_json.get("count"): + return None + + return response_json + + +if __name__ == "__main__": + EclecticIQAnalyzer().run() diff --git a/analyzers/EclecticIQ/requirements.txt b/analyzers/EclecticIQ/requirements.txt new file mode 100644 index 000000000..4a21dbf63 --- /dev/null +++ b/analyzers/EclecticIQ/requirements.txt @@ -0,0 +1,2 @@ +cortexutils +requests \ No newline at end of file diff --git a/analyzers/EmlParser/parse.py b/analyzers/EmlParser/parse.py index 59968115c..4576750cb 100755 --- a/analyzers/EmlParser/parse.py +++ b/analyzers/EmlParser/parse.py @@ -159,7 +159,7 @@ def parseEml(filepath, job_directory, wkhtmltoimage, sanitized_rendering): ## ## Extract raw email ## - result["raw_email"] = raw_email.decode("utf-8") + result["raw_email"] = raw_email.decode("utf-8", errors="replace") ## ## Extract SMTP envelope ## diff --git a/analyzers/Fortiguard/urlcategory.py b/analyzers/Fortiguard/urlcategory.py index 28148b0ce..e0bdc241a 100755 --- a/analyzers/Fortiguard/urlcategory.py +++ b/analyzers/Fortiguard/urlcategory.py @@ -5,6 +5,7 @@ import re import requests from cortexutils.analyzer import Analyzer +import urllib.parse class URLCategoryAnalyzer(Analyzer): @@ -37,12 +38,15 @@ def run(self): try: pattern = re.compile("(?:Category: )([-\w\s]+)") baseurl = 'https://www.fortiguard.com/webfilter?q=' - url = baseurl + self.get_data() + url = baseurl + urllib.parse.quote_plus(self.get_data()) req = requests.get(url) - category_match = re.search(pattern, req.text, flags=0) - self.report({ - 'category': category_match.group(1) - }) + if not req.status_code == 200: + raise Exception(str(req.status_code)+': '+req.reason+' '+url) + else: + category_match = re.search(pattern, req.text, flags=0) + self.report({ + 'category': category_match.group(1) + }) except ValueError as e: self.unexpectedError(e) else: diff --git a/analyzers/Gatewatcher_CTI/Gatewatcher_CTI.py b/analyzers/Gatewatcher_CTI/Gatewatcher_CTI.py index 1c14e385a..af155a196 100755 --- a/analyzers/Gatewatcher_CTI/Gatewatcher_CTI.py +++ b/analyzers/Gatewatcher_CTI/Gatewatcher_CTI.py @@ -50,8 +50,8 @@ def run(self): has_max = False total_found_relations = 0 for item in info["message"][0]["IOCs"]: - if total_found_relations == len(relations) or \ - (has_max and total_found_relations >= self.max_relations): + if (total_found_relations == len(relations) or + (has_max and total_found_relations >= self.max_relations)): break if item["IocId"] in relations: @@ -65,14 +65,16 @@ def run(self): elif item["Type"] in ["URL", "Host", "MD5", "SHA1", "SHA256"]: records["IOCs"].append(item) - additional = {k : v for k, v in additional.items() if v is not None} + additional = {k: v for k, v in additional.items() if v is not None} main.update(additional) records["IOCs"].insert(0, main) + if len(records["IOCs"]) == 1 and records["IOCs"][0]["Risk"].lower() == "unknown": + records["is_on_gw"] = False self.report(records) def check_response(self, response): - if response.status_code not in [200,422]: + if response.status_code not in [200, 422]: try: result = response.json() if ( @@ -102,23 +104,22 @@ def summary(self, raw): level = "info" namespace = "Gatewatcher CTI" predicate = "GetReport" - value = "Not found" + value = "not found" data = next( (ioc for ioc in raw["IOCs"] if ioc["Value"] == self.observable_value), None ) if data is not None: level = data["Risk"].lower() if level == "malicious": - value = 86 + value = 100 elif level == "high suspicious": - value = 71 - level = "suspicious" - else: - value = 31 + value = 75 + elif level == "suspicious": + value = 60 taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) return {"taxonomies": taxonomies} if __name__ == "__main__": - GatewatcherCTI().run() \ No newline at end of file + GatewatcherCTI().run() diff --git a/analyzers/Gatewatcher_CTI/README.md b/analyzers/Gatewatcher_CTI/README.md index 8b30868dd..81dc3896d 100644 --- a/analyzers/Gatewatcher_CTI/README.md +++ b/analyzers/Gatewatcher_CTI/README.md @@ -1,5 +1,18 @@ -Requirement : if you want to use LastInfoSec's intelligence, you need an API key. You could contact LastInfoSec's team here https://www.gatewatcher.com/en/contact/ -LastInfosec has been acquired by Gatewatcher. -LastInfoSec's Threat Feed is a data feed that makes it easier to detect threats within the information system. It contains enriched compromised evidences in order to reduce the time of threat analysis once detected. -https://www.gatewatcher.com/en/nos-produits/last-info-sec +## Gatewatcher +Gatewatcher is a European leader in advanced Threats detection, protecting critical networks of large Entreprises and Governement organisations since 2015. +## Gatewatcher CTI +The Gatewatcher CTI (Cyber Threat Intelligence) offer is compatible with all cybersecurity solutions. It immediately enhances your detection with contextual information about internal and external cyber threats specifically targeting your business. + +## Cortex Integration +This cortex analyzer allows you to search for an IOC (url, hash, host/domain) in the Gatewatcher CTI database + +## How to obtain credentials ? +If you want to try our freemium offer your can obtain your API key : https://info.gatewatcher.com/en/lp-free-ioc-analysis-api-key + +If you want more you can contact us : https://info.gatewatcher.com/fr/speed-meeting-lastinfosec + +## TheHive Integration +With this cortex integration, we also provide you templates for TheHive available in the [thehive-templates](../../thehive-templates/Gatewatcher_CTI_1_0) directory. + +![](assets/Gatewatcher_CTI_long.png) \ No newline at end of file diff --git a/analyzers/HybridAnalysis/HybridAnalysis_GetReport.json b/analyzers/HybridAnalysis/HybridAnalysis_GetReport.json index 2f1bac8bc..060921da0 100644 --- a/analyzers/HybridAnalysis/HybridAnalysis_GetReport.json +++ b/analyzers/HybridAnalysis/HybridAnalysis_GetReport.json @@ -4,18 +4,11 @@ "author": "Daniil Yugoslavskiy, Tieto", "url": "https://github.com/TheHive-Project/Cortex-Analyzers", "license": "AGPL-V3", - "dataTypeList": ["hash", "file", "filename"], + "dataTypeList": ["hash", "file", "filename", "url", "domain"], "description": "Fetch Hybrid Analysis reports associated with hashes and filenames.", "command": "HybridAnalysis/HybridAnalysis_analyzer.py", "baseConfig": "HybridAnalysis", "configurationItems": [ - { - "name": "secret", - "description": "HybridAnalysis secret", - "type": "string", - "multi": false, - "required": true - }, { "name": "key", "description": "API key", diff --git a/analyzers/HybridAnalysis/HybridAnalysis_analyzer.py b/analyzers/HybridAnalysis/HybridAnalysis_analyzer.py index 5f6b35d6f..28ca04c09 100755 --- a/analyzers/HybridAnalysis/HybridAnalysis_analyzer.py +++ b/analyzers/HybridAnalysis/HybridAnalysis_analyzer.py @@ -14,6 +14,7 @@ import hashlib import requests import time +from datetime import datetime from requests.auth import HTTPBasicAuth from cortexutils.analyzer import Analyzer @@ -22,11 +23,10 @@ class VxStreamSandboxAnalyzer(Analyzer): def __init__(self): Analyzer.__init__(self) - self.basic_url = 'https://www.hybrid-analysis.com/api/' - self.headers = {'User-Agent': 'VxStream'} + self.basic_url = 'https://www.hybrid-analysis.com/api/v2/' - self.secret = self.get_param('config.secret', None, 'VxStream Sandbox secret key is missing') self.api_key = self.get_param('config.key', None, 'VxStream Sandbox API key is missing') + self.headers = {'User-Agent': 'VxStream', 'api-key': self.api_key, 'accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'} def summary(self, raw_report): taxonomies = [] @@ -35,20 +35,52 @@ def summary(self, raw_report): level = "info" namespace = "HybridAnalysis" predicate = "Threat level" - value = "No verdict" + value = "Unknown" + + verdicts = { + "no specific threat": 0, + "whitelisted": 1, + "suspicious": 2, + "malicious": 3, + } # define json keys to loop if self.data_type in ['hash', 'file']: - minireports = raw_report.get('results').get('response') - elif self.data_type in ['filename']: - minireports = raw_report.get('results').get('response').get('result') + minireports = raw_report["results"] + elif self.data_type in ['filename', 'url', 'domain']: + minireports = raw_report["results"]["result"] if len(minireports) != 0: - # get first report with not Null verdict + # Previous solution was looping through the report and take the first one that was not empty + # Better solution, loop throught all the last verdicts (less than an hour from last one) and take the worst verdict + # In some cases, HA returns a verdict with "No specific threat" but the one just before (few seconds) from the same scan and different tool was tagued malicious + last_verdict_time = None + last_verdict = None + highest_threat_score = None + for minireport in minireports: - if minireport.get('verdict') is not None: - report_verdict = minireport.get('verdict') - break + if minireport["verdict"] is not None: + if last_verdict_time is None: + last_verdict_time = int(datetime.timestamp(datetime.strptime(minireport["analysis_start_time"][:10] + minireport["analysis_start_time"][11:19], "%Y-%m-%d%H:%M:%S"))) + last_verdict = minireport["verdict"] + # Set the initial threat score + highest_threat_score = minireport.get("threat_score") + else: + new_verdict_time = int(datetime.timestamp(datetime.strptime(minireport["analysis_start_time"][:10] + minireport["analysis_start_time"][11:19], "%Y-%m-%d%H:%M:%S"))) + if abs(last_verdict_time - new_verdict_time) <= 3600: + last_verdict_time = new_verdict_time + try: + if verdicts[minireport["verdict"]] > verdicts[last_verdict]: + last_verdict = minireport["verdict"] + except KeyError: + continue + # Update the highest threat score if the current one is greater + current_threat_score = minireport.get("threat_score") + if current_threat_score is not None: + if highest_threat_score is None or current_threat_score > highest_threat_score: + highest_threat_score = current_threat_score + + report_verdict = last_verdict # create shield badge for short.html if report_verdict == 'malicious': @@ -63,6 +95,11 @@ def summary(self, raw_report): elif report_verdict == 'no specific threat': level = 'info' value = "No Specific Threat" + + # Add the highest threat score if available + if highest_threat_score is not None: + value = f"{value} (Threat Score: {highest_threat_score})" + else: level = 'info' value = "Unknown" @@ -71,39 +108,48 @@ def summary(self, raw_report): return {"taxonomies": taxonomies} def run(self): - try: if self.data_type == 'hash': - query_url = 'scan/' - query_data = self.get_param('data', None, 'Hash is missing') + query_url = 'search/hash' + query_data = {'hash': self.get_param('data', None, 'Hash is missing')} elif self.data_type == 'file': - query_url = 'scan/' + query_url = 'search/hash' hashes = self.get_param('attachment.hashes', None) if hashes is None: filepath = self.get_param('file', None, 'File is missing') - query_data = hashlib.sha256(open(filepath, 'rb').read()).hexdigest() + query_data = {'hash': hashlib.sha256(open(filepath, 'rb').read()).hexdigest()} else: # find SHA256 hash - query_data = next(h for h in hashes if len(h) == 64) + query_data = {'hash': next(h for h in hashes if len(h) == 64)} elif self.data_type == 'filename': - query_url = 'search?query=filename:' - query_data = '"{}"'.format(self.get_param('data', None, 'Filename is missing')) + query_url = 'search/terms' + query_data = {'filename': self.get_param('data', None, 'Filename is missing')} + + elif self.data_type == 'url': + query_url = 'search/terms' + query_data = {'url': self.get_param('data', None, 'URL is missing')} + + elif self.data_type == 'domain': + query_url = 'search/terms' + query_data = {'domain': self.get_param('data', None, 'Domain is missing')} + else: self.notSupported() - url = str(self.basic_url) + str(query_url) + str(query_data) + url = str(self.basic_url) + str(query_url) error = True while error: - r = requests.get(url, headers=self.headers, auth=HTTPBasicAuth(self.api_key, self.secret), verify=True) - if "error" in r.json().get('response'): - if "Exceeded maximum API requests per minute(5)" in r.json().get('response').get('error'): + r = requests.post(url, headers=self.headers, data=query_data, verify=True) + + if "validation_errors" in r.json(): + if "Exceeded maximum API requests per minute(5)" in r.json()["validation_errors"][0]["errors"]: time.sleep(60) else: - self.error(r.json().get('response').get('error')) + self.error(r.json()["validation_errors"][0]["errors"][0]) else: error = False @@ -115,3 +161,4 @@ def run(self): if __name__ == '__main__': VxStreamSandboxAnalyzer().run() + diff --git a/analyzers/Jupyter_Analyzer/Jupyter_Run_Notebook_Analyzer.json b/analyzers/Jupyter_Analyzer/Jupyter_Run_Notebook_Analyzer.json index 4223f074d..b2fc90e9e 100644 --- a/analyzers/Jupyter_Analyzer/Jupyter_Run_Notebook_Analyzer.json +++ b/analyzers/Jupyter_Analyzer/Jupyter_Run_Notebook_Analyzer.json @@ -18,7 +18,8 @@ "registry", "regexp", "other", - "filename" + "filename", + "mail-subject" ], "description": "Execute a parameterized notebook in Jupyter", "baseConfig": "Jupyter", diff --git a/analyzers/KasperskyTIP/KasperskyTIP.py b/analyzers/KasperskyTIP/KasperskyTIP.py index 0783ed9c9..140b88f96 100755 --- a/analyzers/KasperskyTIP/KasperskyTIP.py +++ b/analyzers/KasperskyTIP/KasperskyTIP.py @@ -20,7 +20,7 @@ def summary(self, raw): level = "safe" elif value == "Yellow": level = "suspicious" - elif value == "Red": + elif value in ["Orange", "Red"]: level = "malicious" taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) return {'taxonomies': taxonomies} diff --git a/analyzers/LdapQuery/ldapQuery.py b/analyzers/LdapQuery/ldapQuery.py index dc5d217c5..153f43eda 100755 --- a/analyzers/LdapQuery/ldapQuery.py +++ b/analyzers/LdapQuery/ldapQuery.py @@ -4,7 +4,7 @@ from cortexutils.analyzer import Analyzer import ldap3 from ldap3 import Server, Connection, SIMPLE, SYNC, SUBTREE, ALL - +import datetime class LdapQuery(Analyzer): def __init__(self): @@ -90,6 +90,11 @@ def run(self): users.append(user) self.connection.unbind() + + for user in users: + for key, value in user.items(): + if isinstance(value, datetime.datetime): + user[key] = str(value) self.report({"results": users}) except Exception as e: diff --git a/analyzers/MISP/MISP.json b/analyzers/MISP/MISP.json index 9b85bc52f..1d5b341c3 100644 --- a/analyzers/MISP/MISP.json +++ b/analyzers/MISP/MISP.json @@ -18,7 +18,8 @@ "registry", "regexp", "other", - "filename" + "filename", + "mail-subject" ], "baseConfig": "MISP", "command": "MISP/misp.py", diff --git a/analyzers/MISPWarningLists/mispwarninglists.py b/analyzers/MISPWarningLists/mispwarninglists.py index dc8c7163d..b90c6d2f7 100755 --- a/analyzers/MISPWarningLists/mispwarninglists.py +++ b/analyzers/MISPWarningLists/mispwarninglists.py @@ -161,18 +161,19 @@ def run(self): "SELECT list_name, list_version, concat(subdomain, '.', domain, '.', tld) as value FROM warninglists WHERE (subdomain = '%s' or subdomain = '*') and domain = '%s' and tld = '%s'" % (subdomain, domain, tld) ) - values = self.engine.execute(sql) + with self.engine.connect() as conn: + values = conn.execute(db.text(sql)) + if values.rowcount > 0: + for row in values: + results.append( + { + key: value + for (key, value) in zip( + ["list_name", "list_version", "value"], row + ) + } + ) self.engine.dispose() - if values.rowcount > 0: - for row in values: - results.append( - { - key: value - for (key, value) in zip( - ["list_name", "list_version", "value"], row - ) - } - ) self.report({"results": results, "mode": "db", "is_uptodate": "N/A"}) def summary(self, raw): diff --git a/analyzers/MISPWarningLists/warninglists_create_db.py b/analyzers/MISPWarningLists/warninglists_create_db.py index 663b19f0b..675764a16 100755 --- a/analyzers/MISPWarningLists/warninglists_create_db.py +++ b/analyzers/MISPWarningLists/warninglists_create_db.py @@ -13,14 +13,15 @@ import psycopg2.extras -from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, Index, create_engine +from sqlalchemy.exc import ArgumentError +from sqlalchemy import Table, Column, Integer, String, MetaData, Index, create_engine from sqlalchemy.sql import select from sqlalchemy.dialects.postgresql import CIDR conn_string = "" warninglists_path = "misp-warninglists/**/list.json" -engine = create_engine(conn_string, use_batch_mode=True) +engine = create_engine(conn_string) conn = engine.connect() # UPDATE TLD FROM MOZILLA @@ -148,7 +149,10 @@ # CHECK IF OLD RELEASE ARE IN DB -s = select([warninglists.c.list_name, warninglists.c.list_version]).distinct() +try: + s = select([warninglists.c.list_name, warninglists.c.list_version]).distinct() +except ArgumentError: + s = select(warninglists.c.list_name, warninglists.c.list_version).distinct() last_versions = [x for x in conn.execute(s)] print(f"{len(last_versions)} list already available in db") @@ -189,13 +193,13 @@ try: warninglists_address_idx.create(engine) except: - logging.error(f"warninglists_address_idx already exists") + logging.error("warninglists_address_idx already exists") try: warninglists_hash_idx.create(engine) except: - logging.error(f"warninglists_hash_idx already exists") + logging.error("warninglists_hash_idx already exists") try: warninglists_domain_idx.create(engine) except: - logging.error(f"warninglists_domain_idx already exists") + logging.error("warninglists_domain_idx already exists") engine.dispose() diff --git a/analyzers/MSEntraID/MSEntraID.py b/analyzers/MSEntraID/MSEntraID.py new file mode 100755 index 000000000..93096f351 --- /dev/null +++ b/analyzers/MSEntraID/MSEntraID.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +# Author: @jahamilto +import requests +import traceback +from datetime import datetime, timedelta +from cortexutils.analyzer import Analyzer + +# Initialize Azure Class +class MSEntraID(Analyzer): + def __init__(self): + Analyzer.__init__(self) + self.client_id = self.get_param('config.client_id', None, 'Microsoft Entra ID Application ID/Client ID Missing') + self.client_secret = self.get_param('config.client_secret', None, 'Microsoft Entra ID Registered Application Client Secret Missing') + self.tenant_id = self.get_param('config.tenant_id', None, 'Microsoft Entra ID Tenant ID Mising') + self.time_range = self.get_param('config.lookup_range', 7) + self.lookup_limit = self.get_param('config.lookup_limit', 12) + self.state = self.get_param('config.state', None) + self.country = self.get_param('config.country', None) + + + def run(self): + Analyzer.run(self) + + if self.data_type == 'mail': + try: + self.user = self.get_data() + if not self.user: + self.error("No user supplied") + + + token_data = { + "grant_type": "client_credentials", + 'client_id': self.client_id, + 'client_secret': self.client_secret, + 'resource': 'https://graph.microsoft.com', + 'scope': 'https://graph.microsoft.com' + } + + filter_time = datetime.utcnow() - timedelta(days=self.time_range) + format_time = str("{}T00:00:00Z".format(filter_time.strftime("%Y-%m-%d"))) + + + + #Authenticate to the graph api + + redirect_uri = "https://login.microsoftonline.com/{}/oauth2/token".format(self.tenant_id) + token_r = requests.post(redirect_uri, data=token_data) + token = token_r.json().get('access_token') + + if token_r.status_code != 200: + self.error('Failure to obtain azure access token: {}'.format(token_r.content)) + + # Set headers for future requests + headers = { + 'Authorization': 'Bearer {}'.format(token) + } + + base_url = 'https://graph.microsoft.com/v1.0/' + + r = requests.get(base_url + "auditLogs/signIns?$filter=startsWith(userPrincipalName,'{}') and createdDateTime ge {}&$top={}".format(self.user, format_time, self.lookup_limit), headers=headers) + + # Check API results + if r.status_code != 200: + self.error('Failure to pull sign ins of user {}: {}'.format(self.user, r.content)) + else: + full_json = r.json()['value'] + + new_json = { + "filterParameters": None, + "signIns": [] + } + + # Summary statistics + risks = ex_state = ex_country = 0 + + for signin in full_json: + + success = False + + details = {} + details["signInTime"] = signin["createdDateTime"] + details["ip"] = signin["ipAddress"] + details["appName"] = signin["appDisplayName"] + details["clientApp"] = signin["clientAppUsed"] + details["resourceName"] = signin["resourceDisplayName"] + # Check how to format status result + if signin["status"]["errorCode"] == 0: + details["result"] = "Success" + success = True + else: + details["result"] = "Failure: " + signin["status"]["failureReason"] + details["riskLevel"] = signin["riskLevelDuringSignIn"] + #Increase risk counter + if details["riskLevel"] != 'none' and success: risks += 1 + + device = {} + device_info = signin["deviceDetail"] + device["id"] = "Not Available" if device_info["deviceId"] == "" else device_info["deviceId"] + device["deviceName"] = "Not Available" if device_info["displayName"] == "" else device_info["displayName"] + device["operatingSystem"] = device_info["operatingSystem"] + + location = {} + location_info = signin["location"] + location["city"] = location_info["city"] + location["state"] = location_info["state"] + if self.state and location["state"] != self.state and success: ex_state += 1 + location["countryOrRegion"] = location_info["countryOrRegion"] + if self.country and location["countryOrRegion"] != self.country and success: ex_country += 1 + + + cAC = "None" + for policies in signin["appliedConditionalAccessPolicies"]: + if policies["result"] == "success": + if cAC == 'None': + cAC = policies["displayName"] + else: + cAC += (", " + policies["displayName"]) + + + new_json["signIns"].append({ + "id": signin["id"], + "basicDetails": dict(details), + "deviceDetails": dict(device), + "locationDetails": dict(location), + "appliedConditionalAccessPolicies": cAC + }) + + new_json["sum_stats"] = {"riskySignIns": risks, "externalStateSignIns": ex_state, "foreignSignIns": ex_country} + new_json["filterParameters"] = "Top {} signins from the last {} days. Displaying {} signins.".format(self.lookup_limit, self.time_range, len(new_json["signIns"])) + + # Build report to return to Cortex + self.report(new_json) + + except Exception as ex: + self.error(traceback.format_exc()) + + else: + self.error('Incorrect dataType. "mail" expected.') + + + def summary(self, raw): + taxonomies = [] + + if len(raw.get('signIns', [])) == 0: + taxonomies.append(self.build_taxonomy('info', 'MSEntraIDSignins', 'SignIns', 'None')) + else: + taxonomies.append(self.build_taxonomy('safe', 'MSEntraIDSignins', 'Count', len(raw['signIns']))) + + stats = raw.get("sum_stats", {}) + if stats.get("riskySignIns", 0) != 0: + taxonomies.append(self.build_taxonomy('suspicious', 'MSEntraIDSignins', 'Risky', stats["riskySignIns"])) + if stats.get("externalStateSignIns", 0) != 0: + taxonomies.append(self.build_taxonomy('suspicious', 'MSEntraIDSignins', 'OutOfState', stats["externalStateSignIns"])) + if stats.get("foreignSignIns", 0) != 0: + taxonomies.append(self.build_taxonomy('malicious', 'MSEntraIDSignins', 'ForeignSignIns', stats["foreignSignIns"])) + + return {'taxonomies': taxonomies} + + +if __name__ == '__main__': + MSEntraID().run() diff --git a/analyzers/MSEntraID/MSEntraID_GetSignIns.json b/analyzers/MSEntraID/MSEntraID_GetSignIns.json new file mode 100644 index 000000000..1e11e2751 --- /dev/null +++ b/analyzers/MSEntraID/MSEntraID_GetSignIns.json @@ -0,0 +1,61 @@ +{ + "name": "MSEntraID_GetSignIns", + "version": "1.0", + "author": "@jahamilto", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Pull all Microsoft Entra ID sign ins for a user within the specified amount of time.", + "dataTypeList": ["mail"], + "command": "MSEntraID/MSEntraID.py", + "baseConfig": "MSEntraID", + "configurationItems": [ + {"name": "tenant_id", + "description": "Microsoft Entra ID Tenant ID", + "type": "string", + "multi": false, + "required": true + }, + {"name": "client_id", + "description": "Client ID/Application ID of Microsoft Entra ID Registered App", + "type": "string", + "multi": false, + "required": true + }, + {"name": "client_secret", + "description": "Secret for Microsoft Entra ID Registered Application", + "type": "string", + "multi": false, + "required": true + }, + {"name": "lookup_range", + "description": "Check for sign ins in the last X days. Should be between 1 and 31 days.", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 7 + }, + {"name": "lookup_limit", + "description": "Display no more than this many sign ins.", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 12 + }, + {"name": "state", + "description": "Expected sign in state (used as a taxonomy when sign ins appear outside of this area).", + "type": "number", + "multi": false, + "required": false + }, + {"name": "country", + "description": "Expected sign in country or region (used as a taxonomy when sign ins appear outside of this area).", + "type": "number", + "multi": false, + "required": false + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.microsoft.com/security/business/identity-access/microsoft-entra-id" +} diff --git a/analyzers/MSEntraID/README.md b/analyzers/MSEntraID/README.md new file mode 100644 index 000000000..55ab06fee --- /dev/null +++ b/analyzers/MSEntraID/README.md @@ -0,0 +1,49 @@ +## Microsoft Entra ID Sign In Retriever + +This responder allows you to revoke the session tokens for an Microsoft Entra ID user. Requires the UPN of the account in question, which should be entered as a "mail" oberservable in TheHive. + +### Config + +To enable the responder, you *need* three values: +1. Microsoft Entra ID Tenant ID +2. Application ID +3. Application Secret + +The first two values can be found at any time in the application's Overview page in the Microsoft Entra ID portal. The secret must be generated and then stored in a safe place, as it is only fully visible when you first make it. + +You can also specify the limits for how far back the analyzer requests sign ins. You can specify time and count for how many sign ins get returned. + +Finally, you can specify a state and country/region. These are used as taxonomies. If you run a query on a particular user and they return a few out-of-state sign ins, a taxonomy label will be added to the observable to reflect that. Likewise for the country/region. By default, this analyzer does not support selecting multiple states or countries, so if you have more than one that users will be signing in to, feel free to leave them blank. If the value is not configured, then the analyzer will simply not use the taxonomies. + +## Setup + +### Prereqs +User account with the Cloud Application Administrator role. +User account with the Global Administrator Role (most of the steps can be done with only the Cloud App Administrator role, but the final authorization for its API permissions requires GA). + +### Steps + +#### Creation +1. Navigate to the [Microsoft Entra ID Portal](https://entra.microsoft.com/) and sign in with the relevant administrator account. +2. Navigate to App Registrations, and create a new registration. +3. Provide a display name (this can be anything, and can be changed later). Click Register. + +#### Secret +4. Navigate to Certificates and Secrets. +5. Create a new client secret. Enter a relevant description and set a security-conscious expiration date. +6. Copy the Value. **This will only be fully visible for a short time, so you should immediately copy it and store it in a safe place**. + +#### API Permissions +7. Navigate to API permissions. +8. Add the Directory.Read.All, AuditLog.Read.All, and Policy.Read.ConditionalAccess permissions (Microsoft Graph API, application permissions). +9. Using a GA account, select the "Grant admin consent for *TENANTNAME*" button. + +10. Place the relevant values into the config within Cortex. + +## Customization + +It is possible to add a color coding system to the long report as viewed from TheHive. Specifically, you can color code the Sign Ins table so that certain ones stand out. + +### Example + +Let's say you are in an organization where almost all of your users will be signing in from a single state. You could color code the table so that out-of-state sign ins are highlighted yellow, and out-of-country sign ins are highlighted in red. To enable customization like this, you must modify this analyzer's long.html to check for values within the full JSON report using the ng-style tag in the *table body > table row* element. An example exists as a comment in the long.html file at line 34. \ No newline at end of file diff --git a/responders/AzureTokenRevoker/requirements.txt b/analyzers/MSEntraID/requirements.txt similarity index 100% rename from responders/AzureTokenRevoker/requirements.txt rename to analyzers/MSEntraID/requirements.txt diff --git a/analyzers/MalwareClustering/requirements.txt b/analyzers/MalwareClustering/requirements.txt index 4c62cdeaf..07ce5ae10 100644 --- a/analyzers/MalwareClustering/requirements.txt +++ b/analyzers/MalwareClustering/requirements.txt @@ -3,5 +3,5 @@ requests pyimpfuzzy==0.5 # py2neo is EOL and older versions were deleted from pipy https://github.com/neo4j-contrib/py2neo py2neo==2021.2.4 -apiscout==1.1.5 -python-magic==0.4.22 \ No newline at end of file +apiscout +python-magic==0.4.27 diff --git a/analyzers/Malwares/malwares_api.py b/analyzers/Malwares/malwares_api.py index 896ccca35..3a8018a01 100755 --- a/analyzers/Malwares/malwares_api.py +++ b/analyzers/Malwares/malwares_api.py @@ -9,7 +9,7 @@ class Api(): def __init__(self, api_key=None): self.api_key = api_key - self.base = 'https://public.api.malwares.com/v3/' + self.base = 'https://public.api.ctx.io/api/v22/' self.version = 2 if api_key is None: raise ApiError("You must supply a valid Malwares API key.") diff --git a/analyzers/NERD/README.md b/analyzers/NERD/README.md index da4c7a327..308855738 100644 --- a/analyzers/NERD/README.md +++ b/analyzers/NERD/README.md @@ -1,10 +1,16 @@ -### Nerd -Project [Nerd](https://nerd.cesnet.cz/) aims to build an extensive reputation database of known sources of cyber threats. That is, a list of known malicious IP addresses or other network entities (e.g. ASNs or domain names) together with all security-relevant information about each of them. +### NERD -The analyzer comes in a single flavour that will return additional information categorization for provided ip. + +[NERD](https://nerd.cesnet.cz/) is a service provided by CESNET which collects information about malicious IP addresses +from CESNET's own detection systems as well as several public sources. +It keeps a profile of each known malicious IP address, containing all security-relevant information about the +address, and it summarizes it into a *reputation score* - a number from 0.0 (good) to 1.0 (bad) representing the amount +and confidence of recently received reports about that address. + +The analyzer comes in a single flavour that will return the reputation score and various tags for provided IP. #### Requirements -You need a valid Nerd API integration subscription to use the analyzer. +You need a valid NERD API integration subscription to use the analyzer. - Provide your API key as values for the `key` parameter. -- Default url of NERD instance is provided for `url` parameter but you could override it. \ No newline at end of file +- Default url of NERD instance is provided for `url` parameter, but you could override it. \ No newline at end of file diff --git a/analyzers/NERD/nerd.json b/analyzers/NERD/nerd.json index 75c30a7cf..44d4f03a0 100644 --- a/analyzers/NERD/nerd.json +++ b/analyzers/NERD/nerd.json @@ -1,6 +1,6 @@ { "name": "NERD", - "version": "1.0", + "version": "1.1", "author": "Vaclav Bartos, CESNET", "url": "https://github.com/TheHive-Project/Cortex-Analyzers", "license": "AGPL-V3", diff --git a/analyzers/NERD/nerd_analyzer.py b/analyzers/NERD/nerd_analyzer.py index 73f744533..e4a2a03ca 100755 --- a/analyzers/NERD/nerd_analyzer.py +++ b/analyzers/NERD/nerd_analyzer.py @@ -19,6 +19,7 @@ 'tor': ('Tor exit node', 'info'), 'spam': ('Spam', 'malicious'), 'reserved_ip': ('Reserved IP', 'info'), + 'whitelist': ("Whitelisted", 'safe'), } @@ -41,6 +42,9 @@ def summary(self, raw): # Reputation score (set level/color according to the score) rep = round(raw['rep'], 3) rep_level = 'safe' if rep < 0.02 else ('suspicious' if rep <= 0.5 else 'malicious') + # if the IP is on whitelist, keep the "rep" number as is, but override level to "safe", so it shows as green + if any(t[0] == "Whitelisted" for t in raw['translated_tags']): + rep_level = 'safe' taxonomies.append(self.build_taxonomy(rep_level, 'NERD', 'Rep', rep)) # Number of blacklists @@ -82,7 +86,7 @@ def run(self): self.error("Unexpected or invalid response received from server (can't parse as JSON). A possible reason can be wrong URL.") return - if resp.status_code == 404: + if resp.status_code == 404 and data.get("error") == "IP address not found": # IP not found in NERD's DB (i.e. it wasn't reported as malicious) self.report({ 'rep': 0.0, diff --git a/analyzers/OktaUserLookup/OktaUserLookup.json b/analyzers/OktaUserLookup/OktaUserLookup.json new file mode 100644 index 000000000..d9b37d5ef --- /dev/null +++ b/analyzers/OktaUserLookup/OktaUserLookup.json @@ -0,0 +1,31 @@ +{ + "name": "OktaUserLookup", + "author": "Martin Jaan Leesment", + "license": "AGPL-V3", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "version": "1.0", + "description": "Okta User Lookup is an analyzer for TheHive to enrich mail observables from data through the Okta users API", + "dataTypeList": ["mail"], + "baseConfig": "OktaUserLookup", + "configurationItems": [ + { + "name": "OktaOrgUrl", + "description": "Must contain your okta organisation URL. Eg: https://.okta.com", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "OktaToken", + "description": "Must contain the Okta access token.", + "type": "string", + "multi": false, + "required": true + } + ], + "command": "OktaUserLookup/oktauserlookup_analyzer.py", + "registration_required": true, + "subscription_required": false, + "free_subscription": false, + "service_homepage": "https://developer.okta.com/docs/reference/api/users/" +} diff --git a/analyzers/OktaUserLookup/oktauserlookup_analyzer.py b/analyzers/OktaUserLookup/oktauserlookup_analyzer.py new file mode 100644 index 000000000..fe159319c --- /dev/null +++ b/analyzers/OktaUserLookup/oktauserlookup_analyzer.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# encoding: utf-8 + +import asyncio +from cortexutils.analyzer import Analyzer +from okta.client import Client as OktaClient + +class OktaUserlookupAnalyzer(Analyzer): + def __init__(self): + Analyzer.__init__(self) + self.url = self.get_param('config.OktaOrgUrl', None, 'Missing Okta Organisation URL') + self.okta_token = self.get_param('config.OktaToken', None, 'Missing Okta Token') + + def summary(self, raw): + taxonomies = [] + level = "info" + namespace = "Okta" + predicate = "Query" + + for key, value in raw["results"].items(): + if key in ["Country Code", "Supervisory Org", "Company"]: + taxonomies.append(self.build_taxonomy(level, namespace, predicate, value)) + return {"taxonomies": taxonomies} + + def run(self): + if self.data_type == 'mail': + try: + data = self.get_param("data", None, "Data is missing") + query_parameters = {'q':f'{data}'} + okta_client = OktaClient({'orgUrl':self.url, 'token':self.okta_token}) + async_couroutine = okta_client.list_users(query_parameters) + + response = asyncio.run(async_couroutine) + + userData = dict() + if response[0]: + udt = response[0][0] + userData['Activated'] = udt.activated + userData['City'] = udt.profile.city + userData['Country Code'] = udt.profile.countryCode + userData['Department'] = udt.profile.department + userData['First Name'] = udt.profile.firstName + userData['Last Name'] = udt.profile.lastName + userData['Organization'] = udt.profile.organization + userData['Street Address'] = udt.profile.streetAddress + userData['Title'] = udt.profile.title + if 'workerStatus' in udt.profile.as_dict().keys(): + userData['Worker Status'] = udt.profile.workerStatus + userData['Identity Type'] = udt.profile.identityType + userData['Company'] = udt.profile.company + if 'on_long_leave' in udt.profile.as_dict().keys(): + userData['On Long Leave'] = udt.profile.on_long_leave + if 'supervisoryOrg' in udt.profile.as_dict().keys(): + userData['Supervisory Org'] = udt.profile.supervisoryOrg + userData['Status'] = udt.status.value + userData['Transitioning to Status'] = udt.transitioning_to_status + + self.report({"results": userData}) + except Exception as e: + self.error(str(e)) + +if __name__ == '__main__': + OktaUserlookupAnalyzer().run() diff --git a/analyzers/OktaUserLookup/requirements.txt b/analyzers/OktaUserLookup/requirements.txt new file mode 100644 index 000000000..fdaf0b263 --- /dev/null +++ b/analyzers/OktaUserLookup/requirements.txt @@ -0,0 +1,3 @@ +asyncio +cortexutils +okta diff --git a/analyzers/OpenCTI/OpenCTI_SearchExactObservable.json b/analyzers/OpenCTI/OpenCTI_SearchExactObservable.json index 0b059946a..a66a80201 100644 --- a/analyzers/OpenCTI/OpenCTI_SearchExactObservable.json +++ b/analyzers/OpenCTI/OpenCTI_SearchExactObservable.json @@ -18,7 +18,8 @@ "registry", "regexp", "other", - "filename" + "filename", + "mail-subject" ], "config": { "service": "search_exact" diff --git a/analyzers/OpenCTI/OpenCTI_SearchObservables.json b/analyzers/OpenCTI/OpenCTI_SearchObservables.json index 345fcf11f..290a2b008 100644 --- a/analyzers/OpenCTI/OpenCTI_SearchObservables.json +++ b/analyzers/OpenCTI/OpenCTI_SearchObservables.json @@ -18,7 +18,8 @@ "registry", "regexp", "other", - "filename" + "filename", + "mail-subject" ], "config": { "service": "search_observables" diff --git a/analyzers/OpenCTI/opencti.py b/analyzers/OpenCTI/opencti.py index a9341749c..08a25bf82 100755 --- a/analyzers/OpenCTI/opencti.py +++ b/analyzers/OpenCTI/opencti.py @@ -78,12 +78,16 @@ def run(self): # Get a list of reports containing this observable reports = opencti["api_client"].report.list( - filters=[ - { - "key": "objectContains", + filters={ + "mode": "and", + "filters": [{ + "key": "objects", "values": [observable["id"]], - } - ] + "operator": "eq", + "mode": "or", + },], + "filterGroups": [], + } ) # Strip reports data for lighter output. diff --git a/analyzers/PhishTank/phishtank_checkurl.py b/analyzers/PhishTank/phishtank_checkurl.py index 63946408b..bf1d2f37f 100755 --- a/analyzers/PhishTank/phishtank_checkurl.py +++ b/analyzers/PhishTank/phishtank_checkurl.py @@ -13,8 +13,9 @@ def __init__(self): def phishtank_checkurl(self, data): url = 'https://checkurl.phishtank.com/checkurl/' + postheaders = {"User-Agent": "phishtank/cortex"} postdata = {'url': data, 'format': 'json', 'app_key': self.phishtank_key} - r = requests.post(url, data=postdata) + r = requests.post(url, headers=postheaders, data=postdata) return r.json() def summary(self, raw): diff --git a/analyzers/ProofPoint/proofpoint_lookup.py b/analyzers/ProofPoint/proofpoint_lookup.py index 04f476eae..d4c87122c 100755 --- a/analyzers/ProofPoint/proofpoint_lookup.py +++ b/analyzers/ProofPoint/proofpoint_lookup.py @@ -64,7 +64,11 @@ def run(self): filename = self.get_param('attachment.name', 'noname.ext') filepath = self.get_param('file', None, 'File is missing') with open(filepath, "rb") as f: - digest = hashlib.file_digest(f, "sha256") + try: + digest = hashlib.file_digest(f, "sha256") + except AttributeError: + # python 3.9 + digest = hashlib.sha256(open(filepath, 'r').read()) sha256 = digest.hexdigest() elif self.data_type == 'hash' and len(self.get_data()) == 64: sha256 = self.get_data() diff --git a/analyzers/QrDecode/Dockerfile b/analyzers/QrDecode/Dockerfile new file mode 100644 index 000000000..b30d080a2 --- /dev/null +++ b/analyzers/QrDecode/Dockerfile @@ -0,0 +1,7 @@ +FROM python:3 +WORKDIR /worker +COPY . QrDecode +RUN test ! -e QrDecode/requirements.txt || pip install --no-cache-dir -r QrDecode/requirements.txt +RUN apt-get update && \ + apt-get install -y libzbar0 poppler-utils +ENTRYPOINT ["QrDecode/qrdecode.py"] diff --git a/analyzers/QrDecode/QrDecode.json b/analyzers/QrDecode/QrDecode.json new file mode 100755 index 000000000..a01c691b8 --- /dev/null +++ b/analyzers/QrDecode/QrDecode.json @@ -0,0 +1,11 @@ +{ + "name": "QrDecode", + "version": "1.0", + "author": "THA-CERT", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Extracts data from one or more QR codes.", + "dataTypeList": ["file"], + "command": "QrDecode/qrdecode.py", + "baseConfig": "QrDecode" +} diff --git a/analyzers/QrDecode/README.md b/analyzers/QrDecode/README.md new file mode 100755 index 000000000..be6d30c3f --- /dev/null +++ b/analyzers/QrDecode/README.md @@ -0,0 +1,84 @@ + +# QrDecode + +## Overview + +QrDecode is a QR code analyzer used to extracts and categorizes data from QR codes embedded in various file formats. It supports images in JPEG, PNG, GIF formats, and PDF documents. + +## Features + +- **Multi-format support:** Handles JPEG, PNG, GIF, and PDF files. +- **Data extraction:** Decodes QR codes and identifies data types such as URLs, email addresses, IP addresses, and cryptocurrency addresses. +- **Categorization:** Categorizes extracted data into predefined types and categories. +- **Report Templates:** Report templates available for readability. +- **Error handling:** Detects and reports errors in QR code reading and file format issues. + +## Requirements + +The following dependencies are required for QrDecode: + +### System Libraries + +```bash +sudo apt-get install libzbar0 +sudo apt-get install poppler-utils +``` + +### Python Libraries + +```plaintext +cortexutils +pyzbar +pdf2image +pillow +``` + +To install the Python libraries, run: + +```bash +pip install -r requirements.txt +``` + +## Usage + +Once installed and configured, QrDecode analyzes files containing QR codes. The analyzer extracts data from QR codes, categorizes it, and returns the results in a structured format. For PDF files, the analyzer automatically converts each page to an image format for comprehensive analysis. It also efficiently processes multiple QR codes within a single image or PDF. + +### Running the Analyzer + +To run the analyzer, submit a file through The Hive or Cortex interface, selecting QrDecode as the analyzer. The analyzer will process the file and return results including: + +- Decoded data from QR codes +- Data types and categories + +## Results Details + +When the analyze is finished, the report can display: +* A Summary: with qualitative information about the detection + +![](assets/qrdecode-summary-report.png) + +* Stats: with information like : File Name, File Extension, Total number of QR Codes + +![](assets/qrdecode-stats.png) + +## Extracted Observables + +Moreover, these domains, IP addresses, URLs, bitcoin addresses, email addresses are added to the extracted Observables, ready to be imported and actioned in TheHive. + +![](assets/qrdecode-extracted-observables.png) + +### Error Handling + +The analyzer includes a set of predefined errors to handle cases such as unsupported file formats, failed PDF conversion, and QR code reading issues. These errors are reported back in the analysis results. + +## License + +QrDecode is licensed under the AGPL-V3 license. + +## Version + +**1.0** + +## Author + +- **THA-CERT** \ No newline at end of file diff --git a/analyzers/QrDecode/assets/qrdecode-extracted-observables.png b/analyzers/QrDecode/assets/qrdecode-extracted-observables.png new file mode 100755 index 000000000..4787594b4 Binary files /dev/null and b/analyzers/QrDecode/assets/qrdecode-extracted-observables.png differ diff --git a/analyzers/QrDecode/assets/qrdecode-stats.png b/analyzers/QrDecode/assets/qrdecode-stats.png new file mode 100755 index 000000000..44e57dc78 Binary files /dev/null and b/analyzers/QrDecode/assets/qrdecode-stats.png differ diff --git a/analyzers/QrDecode/assets/qrdecode-summary-report.png b/analyzers/QrDecode/assets/qrdecode-summary-report.png new file mode 100755 index 000000000..100f25fc1 Binary files /dev/null and b/analyzers/QrDecode/assets/qrdecode-summary-report.png differ diff --git a/analyzers/QrDecode/qrdecode.py b/analyzers/QrDecode/qrdecode.py new file mode 100755 index 000000000..1e8a922c7 --- /dev/null +++ b/analyzers/QrDecode/qrdecode.py @@ -0,0 +1,286 @@ +#!/usr/bin/env python3 +# Author : THA-CERT + +import sys +import os +import re +from cortexutils.analyzer import Analyzer +from pyzbar.pyzbar import decode, ZBarSymbol +from PIL import Image +from pdf2image import convert_from_path + +class QrDecode(Analyzer): + def __init__(self): + Analyzer.__init__(self) + self.filename = self.get_param("filename", None, "Filename is missing.") + self.num_page = None + self.nb_page = None + self.file_format = None + self.message = "" + self.total_qr_codes = 0 + self.nb_qrcode = 0 + self.num_qrcode = 0 + self.results_list = [] + self.regex_ipv4 = r'\b((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b' + self.regex_domain = r'^(?!-)[A-Za-z0-9-]{1,63}(? 0: self.cache = Cache(cache_root) - self.url = 'http://torstatus.blutmagie.de/query_export.php/Tor_query_EXPORT.csv' + self.url = 'https://torstatus.rueckgr.at/query_export.php/Tor_query_EXPORT.csv' __cache_key = __name__ + ':raw_data' diff --git a/analyzers/Triage/README.md b/analyzers/Triage/README.md index c2bb95316..f9a59a469 100644 --- a/analyzers/Triage/README.md +++ b/analyzers/Triage/README.md @@ -4,10 +4,4 @@ Triage Sandbox is a commercial malware sandbox that let's you run malware in a s You can read more about the underlying solutions at: https://hatching.io/ -Thus this analyzer requires you to have a commercial license. - -# FAQ - -### Q: There is a free tier as well, why is that not part of this analyzer? - -#### This was done because Triage is an affordable solutions that you should support! +This analyzer requires you to have a commercial license for the Recorded Future sandbox and Private sandbox. diff --git a/analyzers/Triage/Triage.json b/analyzers/Triage/Triage.json index c7b1492bc..551568273 100644 --- a/analyzers/Triage/Triage.json +++ b/analyzers/Triage/Triage.json @@ -3,8 +3,8 @@ "author": "Mikael Keri", "license": "AGPL-V3", "url": "https://github.com/TheHive-Project/Cortex-Analyzers", - "version": "1.0", - "description": "Submit artifacts to the Triage sandbox service. This analyzer requires a paid subscription", + "version": "2.0", + "description": "Submit artifacts to the Recorded Future Triage sandbox service. This analyzer requires a paid subscription for the Private and Recorded Future sandboxes.", "dataTypeList": ["ip", "url", "file"], "baseConfig": "Triage", "config": { @@ -22,6 +22,13 @@ "multi": false, "required": true }, + { + "name": "api_url", + "description": "Sandbox API URL: public sandbox (https://tria.ge/api), private sandbox (https://private.tria.ge/api), or Recorded Future sandbox (https://sandbox.recordedfuture.com/api)", + "type": "string", + "multi": false, + "required": true + }, { "name": "timeout", "description": "Sandbox run timeout in seconds (default: 200)", @@ -39,9 +46,9 @@ ], "registration_required": true, "subscription_required": true, - "free_subscription": false, + "free_subscription": true, "service_homepage": "https://tria.ge", - "service_logo": {"path":"assets/triage_logo.png", "caption": "logo"}, + "service_logo": {"path":"assets/recorded_future_triage_logo.png", "caption": "logo"}, "screenshots": [ {"path":"assets/triage_cortex_settings.png", "caption":"Triage analyzer cortex setting" diff --git a/analyzers/Triage/assets/recorded_future_triage_logo.png b/analyzers/Triage/assets/recorded_future_triage_logo.png new file mode 100644 index 000000000..cd57b0a50 Binary files /dev/null and b/analyzers/Triage/assets/recorded_future_triage_logo.png differ diff --git a/analyzers/Triage/assets/triage_logo.png b/analyzers/Triage/assets/triage_logo.png deleted file mode 100644 index 808813e5f..000000000 Binary files a/analyzers/Triage/assets/triage_logo.png and /dev/null differ diff --git a/analyzers/Triage/triage_analyzer.py b/analyzers/Triage/triage_analyzer.py index 20b389e55..0bbdc264f 100755 --- a/analyzers/Triage/triage_analyzer.py +++ b/analyzers/Triage/triage_analyzer.py @@ -22,7 +22,7 @@ def __init__(self): else: self.timeout = 200 - self.url = 'https://private.tria.ge/api' + self.url = self.get_param('config.api_url', 'https://private.tria.ge/api', 'Triage API URL is missing') def summary(self, raw): taxonomies = [] @@ -87,25 +87,32 @@ def url_submit(self, data): return retrive def run(self): + # strip api from the base URL + base_url = self.url.rstrip('api') + if self.data_type == 'ip' or self.data_type == 'url': + data = self.get_param('data', None, 'Data is missing') + + if ':' in data: + result = self.url_submit(data) + self.report({ + 'result': result, + 'url': base_url + }) + else: + self.error('Schema is missing') - if self.data_type == 'ip' or self.data_type == 'url': - data = self.get_param('data', None, 'Data is missing') - - if ':' in data: - result = self.url_submit(data) - self.report({'result': result}) - else: - self.error('Schema is missing') - - elif self.data_type == 'file': - filepath = self.get_param('file', None, 'File is missing') - filename = self.get_param('filename', basename(filepath)) + elif self.data_type == 'file': + filepath = self.get_param('file', None, 'File is missing') + filename = self.get_param('filename', basename(filepath)) - result = self.file_submit(filename, filepath) + result = self.file_submit(filename, filepath) - self.report({'result': result}) - else: - data = self.get_param('data', None, 'Data is missing') + self.report({ + 'result': result, + 'url': base_url + }) + else: + data = self.get_param('data', None, 'Data is missing') if __name__ == '__main__': TriageAnalyzer().run() diff --git a/analyzers/Virusshare/getHashes.sh b/analyzers/Virusshare/getHashes.sh index 444f5ce90..2e9b38b09 100755 --- a/analyzers/Virusshare/getHashes.sh +++ b/analyzers/Virusshare/getHashes.sh @@ -3,7 +3,7 @@ display_usage() { - echo "getHashes v0.2" + echo "getHashes v0.3" echo " Fetch all Virusshare.com hashes" echo -e "\n Usage: $0 \n" } @@ -20,17 +20,19 @@ if [ ! -d $1 ]; then fi -cd $1 -for u in `curl https://virusshare.com/hashes.4n6 | grep -E "VirusShare_[0-9]{5}\.md5" | c\ -ut -d\" -f2 | cut -d\/ -f2` +WD=$1 +declare -a base_urls=($(printf 'url=https://virusshare.com/hashfiles/%0.s\n' {1..1})) +declare -a base_outs=($(printf 'output=./%0.s\n' {1..1})) + +pushd $WD +while mapfile -t -n 8 ary && ((${#ary[@]})); do - echo $u - if [ -e $1/$u ]; then - echo "File already downloaded" - else - wget https://virusshare.com/hashes/$u - sleep 3 - fi - -done | tee -a ../$0.log -cd .. + rm -f ../config + IFS=, + eval echo "${base_urls[*]}"{"${ary[*]}"} | tr " " "\n" >> ../config + eval echo "${base_outs[*]}"{"${ary[*]}"} | tr " " "\n" >> ../config + curl -s -N --parallel --parallel-immediate --parallel-max 8 --config config | tee -a ../$0.log + sleep 3 +done <<< `curl -s -L https://virusshare.com/hashes.4n6 | grep -E "VirusShare_[0-9]{5}\.md5" | cut -d\" -f2 | cut -d\/ -f2` +popd + diff --git a/docs/admin_guides/how-to-upgrade-analyzers-responders.md b/docs/admin_guides/how-to-upgrade-analyzers-responders.md new file mode 100644 index 000000000..d7a48ca1f --- /dev/null +++ b/docs/admin_guides/how-to-upgrade-analyzers-responders.md @@ -0,0 +1,66 @@ +# How to upgrade analyzers & responders to the latest version + +This guide outlines the steps to take when there is a new release of Cortex-Analyzers so that you can benefit from the new or updated analyzers and responders. + +There are three steps to perform, two of which require user action: + +1. **Catalog Update** (automatic) +2. **Configure Analyzers & Responders in Cortex** (user action required) +3. **Update Analyzers' Report Templates** (user action required) + + +## Step 1: Catalog Update + +With **TheHive version 5.0.14 and above** and **Cortex version 3.1.7 and above**, Cortex automatically fetches and updates the catalog. As a result, you may receive a notification in TheHive indicating that action is required if there is any new version of an analyzer or responder you are already using. + +This notification can be seen in the *bottom left* corner of your TheHive interface. + +![TheHive Notification for new analyzers/responders](../images/cortex-thehive-analyzers-upgrade-notification.png) + +Clicking on it will open a drawer indicating if there are any obsolete analyzers or responders. + +![TheHive Obsolete Analyzers](<../images/thehive-cortex-obsolete-drawer.png>) + +## Step 2: Configure Analyzers & Responders in Cortex + +### 2a. Setting Up Newly Available Analyzers or Responders + +When new analyzers or responders are available, please refer to the [changelog](https://thehive-project.github.io/Cortex-Analyzers/CHANGELOG/) to review the new additions so you don't miss anything. + +Then, perform the following steps: + +- **Log in to Cortex** as an Org Administrator +- **Refresh Analyzers and Responders** by navigating to the ***Organization*** section, selecting the ***Analyzers*** and ***Responders*** tab and pressing the ***Refresh*** button. +- **Enable new analyzers and responders** you wish to use. +- **Configure the settings and authentication parameters** as needed. + +![refresh responders](../images/refresh-responders.png) + +### 2b. Updating Obsolete Analyzers or Responders + +Analyzers or responders become obsolete when a new version is available. + +#### Check for Updates in Cortex + +- **Log in to Cortex** as an Org Administrator to review available updates. +- Look out for any **red badge notifications**, as they indicate actions that need your attention. +- **Refresh Analyzers and Responders** by navigating to the ***Organization*** section, selecting the ***Analyzers*** and ***Responders*** tab and pressing the ***Refresh*** button. + +![obsolete analyzer refresh](../images/obsolete-analyzer-refresh.png) + +#### Update Your Configuration + +- If there is a version increment, **disable older versions** that are no longer needed, and enable the new versions by pressing the "Enable" button on the newer one. +- **Configure the settings and authentication parameters** as needed. + + +![enable analyzer](../images/enable-analyzer.png) + + +## Step 3: Update the Analyzers' Report Templates + +If you're using **TheHive 5**, remember to always **import the new report templates** into your instance. This step is essential for an optimal experience with the updated analyzers and responders. Otherwise, you may encounter issues with the report templates for the new analyzers. + +Refer to the [official documentation on how to update Analyzers templates](https://docs.strangebee.com/thehive/administration/analyzers-templates/) in your TheHive tenant. + +![update-analyzers-template](../images/update-analyzers-template.png) diff --git a/docs/images/cortex-thehive-analyzers-upgrade-notification.png b/docs/images/cortex-thehive-analyzers-upgrade-notification.png new file mode 100644 index 000000000..bea591916 Binary files /dev/null and b/docs/images/cortex-thehive-analyzers-upgrade-notification.png differ diff --git a/docs/images/enable-analyzer.png b/docs/images/enable-analyzer.png new file mode 100644 index 000000000..1542d1db6 Binary files /dev/null and b/docs/images/enable-analyzer.png differ diff --git a/docs/images/obsolete-analyzer-refresh.png b/docs/images/obsolete-analyzer-refresh.png new file mode 100644 index 000000000..e42d8ee33 Binary files /dev/null and b/docs/images/obsolete-analyzer-refresh.png differ diff --git a/docs/images/refresh-responders.png b/docs/images/refresh-responders.png new file mode 100644 index 000000000..8496780ff Binary files /dev/null and b/docs/images/refresh-responders.png differ diff --git a/docs/images/thehive-cortex-obsolete-drawer.png b/docs/images/thehive-cortex-obsolete-drawer.png new file mode 100644 index 000000000..6586993e3 Binary files /dev/null and b/docs/images/thehive-cortex-obsolete-drawer.png differ diff --git a/docs/images/update-analyzers-template.png b/docs/images/update-analyzers-template.png new file mode 100644 index 000000000..a6bde2749 Binary files /dev/null and b/docs/images/update-analyzers-template.png differ diff --git a/responders/AWSLambda/AWSInvokeLambda.json b/responders/AWSLambda/AWSInvokeLambda.json new file mode 100755 index 000000000..291b7ce63 --- /dev/null +++ b/responders/AWSLambda/AWSInvokeLambda.json @@ -0,0 +1,70 @@ +{ + "name": "AWSLambda_InvokeFunction", + "version": "1.0", + "author": "nusantara-self,StrangeBee", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Invokes the configured AWS Lambda function", + "dataTypeList": ["thehive:case", "thehive:alert", "thehive:case_artifact", "thehive:task"], + "command": "AWSLambda/AWSInvokeLambda.py", + "baseConfig": "AWSLambda", + "configurationItems": [ + { + "name": "aws_access_key_id", + "description": "AWS Access Key ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "" + }, + { + "name": "aws_secret_access_key", + "description": "AWS Secret Access Key", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "" + }, + { + "name": "aws_region", + "description": "AWS Region", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "us-east-1" + }, + { + "name": "lambda_function_name", + "description": "Name of the AWS Lambda function to invoke", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "" + }, + { + "name": "invocation_type", + "description": "Invocation type for the lambda function. Default is 'RequestResponse'. Change to 'Event' for asynchronous invocation.", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "RequestResponse" + }, + { + "name": "add_tag_to_case", + "description": "Add a tag to case mentioning the AWS Lambda function that was invoked", + "type": "boolean", + "multi": false, + "required": true, + "defaultValue": true + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://aws.amazon.com/lambda/", + "service_logo": { + "path": "assets/awslambda.png", + "caption": "AWS Lambda logo" + } + } + diff --git a/responders/AWSLambda/AWSInvokeLambda.py b/responders/AWSLambda/AWSInvokeLambda.py new file mode 100755 index 000000000..150e15a3b --- /dev/null +++ b/responders/AWSLambda/AWSInvokeLambda.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +from cortexutils.responder import Responder +import boto3 +import json +from botocore.exceptions import BotoCoreError, ClientError + +class AWSLambda(Responder): + def __init__(self): + Responder.__init__(self) + self.aws_access_key_id = self.get_param('config.aws_access_key_id', None, 'AWS Access Key ID missing') + self.aws_secret_access_key = self.get_param('config.aws_secret_access_key', None, 'AWS Secret Access Key missing') + self.aws_region = self.get_param('config.aws_region', None, 'AWS Region missing') + self.lambda_function_name = self.get_param('config.lambda_function_name', None, 'Lambda Function Name missing') + self.invocation_type = self.get_param('config.invocation_type', None, 'RequestResponse') + self.add_tag_to_case = self.get_param('config.add_tag_to_case', True) + + def run(self): + Responder.run(self) + + payload_data = self.get_param("data", None, "No data was passed from TheHive") + + # Initialize a session using boto3 + session = boto3.Session( + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key, + region_name=self.aws_region + ) + + # Initialize the Lambda client + lambda_client = session.client('lambda') + + try: + # Invoke the Lambda function + response = lambda_client.invoke( + FunctionName=self.lambda_function_name, + InvocationType=self.invocation_type, + Payload=json.dumps(payload_data) + ) + + + if self.invocation_type == 'Event': + # In case of async invocations (Event) , there is no response payload + message = f'Lambda function {self.lambda_function_name} invoked asynchronously (Event mode). Invocation acknowledged, no response payload.' + self.report({"message": message}) + return + + if 'FunctionError' in response: + self._handle_error( + message="Error from Lambda function", + error_type='LambdaFunctionError', + details=response.get('FunctionError', 'Unknown function error'), + additional_info=None + ) + return + + # Extract and decode response payload + response_payload = json.loads(response['Payload'].read()) + message=f'Lambda function {self.lambda_function_name} invoked successfully: {response_payload}' + self.report({"message": message}) + + except BotoCoreError as e: + self._handle_error( + message="BotoCoreError occurred", + error_type='BotoCoreError', + details=str(e) + ) + + except ClientError as e: + error_message = e.response['Error']['Message'] + self._handle_error( + message="ClientError occurred", + error_type='ClientError', + details=error_message, + additional_info=e.response + ) + + except Exception as e: + self._handle_error( + message="An unexpected exception occurred", + error_type='GeneralException', + details=str(e) + ) + + def _handle_error(self, message, error_type, details, additional_info=None): + """Helper function to handle errors and return a string message.""" + error_message = f"[{error_type}] {message}: {details} \n\nAdditional info: {additional_info}" + self.error(error_message) + + def operations(self, raw): + operations = [] + if self.add_tag_to_case: + tag = f"AWSLambdaInvoked-{self.lambda_function_name}" + operations.append(self.build_operation('AddTagToCase', tag=tag)) + return operations + +if __name__ == '__main__': + AWSLambda().run() diff --git a/responders/AWSLambda/Dockerfile b/responders/AWSLambda/Dockerfile new file mode 100755 index 000000000..0a745d7f0 --- /dev/null +++ b/responders/AWSLambda/Dockerfile @@ -0,0 +1,6 @@ +FROM python:3 + +WORKDIR /worker +COPY . AWSInvokeLambda +RUN test ! -e AWSInvokeLambda/requirements.txt || pip install --no-cache-dir -r AWSInvokeLambda/requirements.txt +ENTRYPOINT AWSInvokeLambda/AWSInvokeLambda.py diff --git a/responders/AWSLambda/README.md b/responders/AWSLambda/README.md new file mode 100644 index 000000000..47e4a9493 --- /dev/null +++ b/responders/AWSLambda/README.md @@ -0,0 +1,42 @@ +### AWS Lambda Responder + +This responder triggers an AWS Lambda function using the provided credentials and configuration, directly from TheHive. By default, it can be triggered from an alert, case, observable, task and sends the data of the object as input to the AWS Lambda Function for its execution. +Make sure to manage these different objects appropriately if needed. + +#### Setup example +- Log in to your [AWS Management Console](https://aws.amazon.com/console/) go to **IAM** +- Create a **new IAM user** (e.g. CortexAWSlambda-invoke-responder) with AWS Credentials type : Access key - Programmatic +- Choose **attach policies directly** and attach a policy you created with least privilege, for example: +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "lambda:InvokeFunction" + ], + "Resource": [ + "arn:aws:lambda:::function:" + ] + } + ] +} +``` +- Go to your newly created user, to **Security tab** and create **access key** for an **Application running outside AWS** +- Configure properly the responder with the right credentials & aws region + +#### Successful Execution + +When an execution is successful in `RequestResponse` mode, the responder will be marked as "Success" with a report message in the following format: + +``` +{ "message": "Lambda function '' invoked successfully.", "response": "" } +``` + +#### Failed Execution + +When an execution fails in `RequestResponse` mode, the responder will be marked as "Failure" with a report message in the following format: +``` +"[{error_type}] {message}: {details}\n\nAdditional info: {additional_info}" +``` diff --git a/responders/AWSLambda/assets/awslambda.png b/responders/AWSLambda/assets/awslambda.png new file mode 100644 index 000000000..01a84ca17 Binary files /dev/null and b/responders/AWSLambda/assets/awslambda.png differ diff --git a/responders/AWSLambda/requirements.txt b/responders/AWSLambda/requirements.txt new file mode 100755 index 000000000..e996eb0fb --- /dev/null +++ b/responders/AWSLambda/requirements.txt @@ -0,0 +1,2 @@ +cortexutils +boto3 \ No newline at end of file diff --git a/responders/AWX/awx.json b/responders/AWX/awx.json new file mode 100644 index 000000000..e70cde651 --- /dev/null +++ b/responders/AWX/awx.json @@ -0,0 +1,52 @@ +{ + "name": "AWX_StartJob", + "version": "1.0", + "author": "Tim Muehlhausen", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Start a job on AWX", + "dataTypeList": ["thehive:case_artifact"], + "command": "AWX/awx.py", + "baseConfig": "AWX", + "configurationItems": [ + { + "name": "url", + "description": "The URL to your AWX instance, expl. https://awx.intern.foo.de", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "username", + "description": "The AWX user", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "password", + "description": "The AWX user password", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "cert_path", + "description": "If you need a certificate to authentificate to your AWX, expl. /etc/ssl/certs/foo.de.pem", + "type": "string", + "multi": false, + "required": false + }, + { + "name": "workflow_id", + "description": "The ID of the workflow to execute", + "type": "string", + "multi": false, + "required": true + } + ], + "registration_required": false, + "subscription_required": false, + "free_subscription": false, + "service_homepage": "https://www.ansible.com/awx/" +} diff --git a/responders/AWX/awx.py b/responders/AWX/awx.py new file mode 100644 index 000000000..3f92216cf --- /dev/null +++ b/responders/AWX/awx.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +from cortexutils.responder import Responder +import requests +import json + +class AWX(Responder): + def __init__(self): + Responder.__init__(self) + self.url = self.get_param("config.url", "") + self.username = self.get_param("config.username","") + self.password = self.get_param("config.password","") + self.workflow_id = self.get_param("config.workflow_id", "") + self.observable_all = self.get_param('data', None, 'Data missing!') + self.cert_path = self.get_param('config.cert_path') + + def run(self): + Responder.run(self) + headers = { + 'Content-Type': 'application/json' + } + payload = { + 'extra_vars': json.dumps(self.observable_all) + } + # Start the job + job_start_endpoint = self.url + '/api/v2/job_templates/' + self.workflow_id + '/launch/' + + try: + response = requests.post( + job_start_endpoint, + headers=headers, + auth=(self.username, self.password), + data=json.dumps(payload), + verify=self.cert_path if self.cert_path else False + ) + + response.raise_for_status() + + if response.status_code == 201: + self.report({"Message": "Executed AWX job successfully"}) + else: + error_msg = response.json().get('detail', 'Unknown error') + self.error(f"AWX job execution returned unexpected status {response.status_code}: {error_msg}") + except requests.exceptions.SSLError as ssl_err: + self.error(f"SSL Error: {str(ssl_err)}") + except requests.exceptions.ConnectionError as conn_err: + self.error(f"Connection Error: {str(conn_err)}") + except requests.exceptions.Timeout as timeout_err: + self.error(f"Request Timeout: {str(timeout_err)}") + except requests.exceptions.RequestException as req_err: + try: + # Try to get additional details from the JSON response + error_details = response.json().get('detail', 'No additional error details available.') + except json.JSONDecodeError: + error_details = 'Failed to parse error details from response.' + + self.error(f"Request Error: {str(req_err)} - Details: {error_details}") + except Exception as unexpected_err: + self.error(f"Unexpected Error: {str(unexpected_err)}") + + +if __name__ == '__main__': + AWX().run() diff --git a/responders/AWX/requirements.txt b/responders/AWX/requirements.txt new file mode 100644 index 000000000..6aabc3cfa --- /dev/null +++ b/responders/AWX/requirements.txt @@ -0,0 +1,2 @@ +cortexutils +requests diff --git a/responders/AzureTokenRevoker/AzureTokenRevoker.json b/responders/AzureTokenRevoker/AzureTokenRevoker.json deleted file mode 100644 index 41e983e8e..000000000 --- a/responders/AzureTokenRevoker/AzureTokenRevoker.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "name": "AzureTokenRevoker", - "version": "1.0", - "author": "Daniel Weiner @dmweiner", - "url": "https://github.com/TheHive-Project/Cortex-Analyzers", - "license": "AGPL-V3", - "description": "Revoke all Microsoft Azure authentication session tokens for a list of User Principal Names", - "dataTypeList": ["thehive:case"], - "command": "AzureTokenRevoker.py", - "baseConfig": "AzureTokenRevoker", - "configurationItems": [ - {"name": "redirect_uri", - "description": "Azure AD Application URI (Example: https://login.microsoftonline.com/TENANTIDHERE/oauth2/token)", - "type": "string", - "multi": false, - "required": true - }, - {"name": "client_id", - "description": "Client ID/Application ID of Azure AD Registered App", - "type": "string", - "multi": false, - "required": true - }, - {"name": "client_secret", - "description": "Secret for Azure AD Registered Application", - "type": "string", - "multi": false, - "required": true - } - ] - -} diff --git a/responders/AzureTokenRevoker/AzureTokenRevoker.py b/responders/AzureTokenRevoker/AzureTokenRevoker.py deleted file mode 100755 index 7fee2ea90..000000000 --- a/responders/AzureTokenRevoker/AzureTokenRevoker.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -# encoding: utf-8 -# Author: Daniel Weiner @dmweiner -import requests -import traceback -import datetime -from cortexutils.responder import Responder - -# Initialize Azure Class -class AzureTokenRevoker(Responder): - def __init__(self): - Responder.__init__(self) - self.client_id = self.get_params('config.client_id', None, 'Azure AD Application ID/Client ID Missing') - self.client_secret = self.get_params('config.client_secret', None, 'Azure AD Registered Application Client Secret Missing') - self.redirect_uri = self.get_params('config.redirect_uri', None, 'Set a redirect URI in Azure AD Registered Application. (ex. https://logon.microsoftonline./oauth2/token)') - self.time = '' - def run(self): - try: - self.user = self.get_params('data.data', None, 'No UPN supplied to revoke credentials for') - if not self.user: - self.error("No user supplied") - base_resource = "https://graph.microsoft.com" - - token_data = { - "grant_type": "client_credentials", - 'client_id': self.client_id, - 'client_secret': self.client_secret, - 'resource': 'https://graph.microsoft.com', - 'scope': 'https://graph.microsoft.com' - } - - - #Authenticate to the graph api - - token_r = requests.post(self.redirect_uri, data=token_data) - token = token_r.json().get('access_token') - - if token_r.status_code != 200: - self.error('Failure to obtain azure access token: {}'.format(token_r.content)) - - # Set headers for future requests - headers = { - 'Authorization': 'Bearer {}'.format(token) - } - - base_url = 'https://graph.microsoft.com/v1.0/' - - r = requests.post(base_url + 'users/{}/revokeSignInSessions'.format(self.user), headers=headers) - - if r.status_code != 200: - self.error('Failure to revoke access tokens of user {}: {}'.format(self.user, r.content)) - - else: - #record time of successful auth token revokation - self.time = datetime.datetime.utcnow() - - except Exception as ex: - self.error(traceback.format_exc()) - # Build report to return to Cortex - full_report = {"message": "User {} authentication tokens successfully revoked at {}".format(self.user, self.time)} - self.report(full_report) - - -if __name__ == '__main__': - AzureTokenRevoker().run() diff --git a/responders/Cloudflare_IP_Blocker/CloudflareIPBlocker.json b/responders/Cloudflare_IP_Blocker/CloudflareIPBlocker.json new file mode 100644 index 000000000..73e82c714 --- /dev/null +++ b/responders/Cloudflare_IP_Blocker/CloudflareIPBlocker.json @@ -0,0 +1,38 @@ +{ + "name": "Cloudflare_IP_Blocker", + "version": "1.0", + "author": "Nick Babkin @nickbabkin", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Block IP Address on Account level in Cloudflare", + "dataTypeList": ["thehive:case_artifact"], + "command": "Cloudflare_IP_Blocker/CloudflareIPBlocker.py", + "baseConfig": "CloudflareIPBlocker", + "configurationItems": [ + { + "name": "cloudflare_api_key", + "description": "Cloudflare API Key", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "cloudflare_account_ids", + "description": "Cloudflare Account IDs to block IP address in", + "type": "string", + "multi": true, + "required": true + }, + { + "name": "cloudflare_action", + "description": "Cloudflare Action: block, challenge, whitelist, js_challenge or managed_challenge", + "type": "string", + "multi": false, + "required": true + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.cloudflare.com" +} diff --git a/responders/Cloudflare_IP_Blocker/CloudflareIPBlocker.py b/responders/Cloudflare_IP_Blocker/CloudflareIPBlocker.py new file mode 100755 index 000000000..da1ef7df5 --- /dev/null +++ b/responders/Cloudflare_IP_Blocker/CloudflareIPBlocker.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +# Author: Nick Babkin @nickbabkin +import requests +import traceback +import datetime +import os +from cortexutils.responder import Responder + +# Initialize Cloudflare Responder Class +class CloudflareIPBlocker(Responder): + + def __init__(self): + Responder.__init__(self) + self.cloudflare_api_key = self.get_param('config.cloudflare_api_key', None, 'Cloudflare API Key') + self.cloudflare_account_ids = self.get_param('config.cloudflare_account_ids', [], 'Cloudflare Account ID') + self.cloudflare_action = self.get_param('config.cloudflare_action', [], 'Cloudflare Action') + self.time = '' + self.dataType = self.get_param('data.dataType') + + def run(self): + try: + if self.dataType== "ip": + self.ip_address = self.get_param('data.data', None, 'No IP Address supplied') + else: + self.error("No IP Address supplied") + + # Build the request payload to block the IP address + payload = { + "mode": self.cloudflare_action, + "configuration": { + "target": "ip", + "value": self.ip_address, + }, + "notes": "Blocked by Hive responder. Case number {}".format(self.get_param('data.case.caseId', None, 'No CaseID Fetched')) + } + + # Make the API request to Cloudflare + for account_id in self.cloudflare_account_ids: + url = "https://api.cloudflare.com/client/v4/accounts/{}/firewall/access_rules/rules".format(account_id) + headers = { + "Authorization": "Bearer {}".format(self.cloudflare_api_key), + "Content-Type": "application/json" + } + response = requests.post(url, headers=headers, json=payload) + + # Checking for errors + if response.status_code != 200: + self.error('Request failed with the following status: {}'.format(response.status_code)) + + else: + #record time + self.time = datetime.datetime.utcnow() + + except Exception as ex: + self.error(traceback.format_exc()) + # Build report to return to Cortex + full_report = {"message": "IP Address {} successfully blocked at {}".format(self.ip_address, self.time)} + self.report(full_report) + + def operations(self, raw): + return [self.build_operation('AddTagToArtifact', tag='Cloudflare:Blocked')] + + +if __name__ == '__main__': + CloudflareIPBlocker().run() diff --git a/responders/Cloudflare_IP_Blocker/requirements.txt b/responders/Cloudflare_IP_Blocker/requirements.txt new file mode 100644 index 000000000..98df81c2f --- /dev/null +++ b/responders/Cloudflare_IP_Blocker/requirements.txt @@ -0,0 +1,3 @@ +cortexutils +requests +datetime \ No newline at end of file diff --git a/responders/Duo_Security/DuoBypassUserAccount.json b/responders/Duo_Security/DuoBypassUserAccount.json new file mode 100644 index 000000000..8bafe7758 --- /dev/null +++ b/responders/Duo_Security/DuoBypassUserAccount.json @@ -0,0 +1,34 @@ +{ + "name": "DuoBypassUserAccount", + "version": "1.0", + "author": "jahamilto", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Put User Account into Bypass mode in Duo Security via AdminAPI (The user will not be prompted when logging in.)", + "dataTypeList": ["thehive:case_artifact"], + "command": "Duo_Security/duoBypassUserAccount.py", + "baseConfig": "Duo_Security_main", + "configurationItems": [ + { + "name": "API_hostname", + "description": "Duo Admin API hostname, api-XXXXXXXX.duosecurity.com", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "Integration_Key", + "description": "Integration Key", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "Secret_Key", + "description": "Secret Key", + "type": "string", + "multi": false, + "required": true + } + ] +} diff --git a/responders/Duo_Security/duoBypassUserAccount.py b/responders/Duo_Security/duoBypassUserAccount.py new file mode 100644 index 000000000..dc16e3e31 --- /dev/null +++ b/responders/Duo_Security/duoBypassUserAccount.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# encoding: utf-8 + +from cortexutils.responder import Responder +import requests +import duo_client +from datetime import datetime + +class DuoBypassUserAccount(Responder): + def __init__(self): + Responder.__init__(self) + self.API_hostname = self.get_param('config.API_hostname', None, "API hostname is missing") + self.iKey = self.get_param('config.Integration_Key', None, "Integration Key is missing") + self.sKey = self.get_param('config.Secret_Key', None, "Secret Key is missing") + + def run(self): + Responder.run(self) + + if self.get_param('data.dataType') == 'username': + + str_username = self.get_param('data.data', None, 'No artifacts available') + + admin_api = duo_client.Admin(self.iKey, self.sKey, self.API_hostname) + + response = admin_api.get_users_by_name(username=str_username) + +# print(response) + + user_id=response[0]["user_id"] + +# print("user_id:",user_id) + + r = admin_api.update_user(user_id=user_id,status='bypass') + +# print("response:",r) + + if r.get('status') == 'bypass': + self.report({'message': 'User is in bypass mode in Duo Security.'}) + else: + self.error('Failed to put User Account in bypass mode in Duo.') + else: + self.error('Incorrect dataType. "username" expected.') + + def operations(self, raw): + return [self.build_operation('AddTagToArtifact', tag='Duo User: bypass')] + +if __name__ == '__main__': + DuoBypassUserAccount().run() diff --git a/responders/EclecticIQIndicator/EclecticIQIndicator.json b/responders/EclecticIQIndicator/EclecticIQIndicator.json new file mode 100644 index 000000000..6e3eefbae --- /dev/null +++ b/responders/EclecticIQIndicator/EclecticIQIndicator.json @@ -0,0 +1,35 @@ +{ + "name": "EclecticIQ_Indicator_API", + "version": "1.0", + "author": "EclecticIQ", + "url": "https://eclecticiq.com", + "license": "MIT", + "description": "Submit indicators to the EclecticIQ Intelligence Center api", + "dataTypeList": ["thehive:case_artifact", "thehive:case"], + "command": "EclecticIQIndicator/EclecticIQIndicator.py", + "baseConfig": "EclecticIQIndicator", + "configurationItems": [ + { + "name": "eiq_host_url", + "description": "EclecticIQ Intelligence Center host url", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "eiq_api_key", + "description": "EclecticIQ Intelligence Center API key", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "group_name", + "description": "EclecticIQ Intelligence Center Group Name", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "Testing Group" + } + ] +} diff --git a/responders/EclecticIQIndicator/EclecticIQIndicator.py b/responders/EclecticIQIndicator/EclecticIQIndicator.py new file mode 100755 index 000000000..ba95d4cdc --- /dev/null +++ b/responders/EclecticIQIndicator/EclecticIQIndicator.py @@ -0,0 +1,361 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from datetime import datetime +from typing import Optional + +import requests +import traceback +import uuid + +from cortexutils.responder import Responder + + +SEVERITY_MAP = { + 1: "LOW", + 2: "MEDIUM", + 3: "HIGH", + 4: "CRITICAL", +} +CONFIDENCE_MAP = {1: "Low", 2: "Medium", 3: "High", 4: "High"} +DEFAULT_TAGS = ["Hive", "Cortex", "Responder"] +TLP_PAP_MAP = { + 0: "WHITE", + 1: "GREEN", + 2: "AMBER", + 3: "RED", +} + + +class EclecticIQIndicator(Responder): + def __init__(self): + Responder.__init__(self) + self.eiq_host_url = self.get_param( + "config.eiq_host_url", + None, + "EclecticIQ Intelligence Center host URL (e.g.:https://demo.eclecticiq.com)", + ) + self.apikey = self.get_param( + "config.eiq_api_key", None, "EclecticIQ Intelligence Center API key missing" + ) + self.headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {self.apikey}", + } + self.group_name = self.get_param( + "config.group_name", + "Testing Group", + "EclecticIQ Intelligence Center Group Name (e.g.:Testing Group)", + ) + + @staticmethod + def convert_eiq_observable_type(value): + ioc_types = { + "address": "address", + "asn": "asn", + "cve": "cve", + "domain": "domain", + "email": "email", + "file": "file", + "filename": "file", + "fqdn": "host", + "hash": "hash-sha256", + "host": "host", + "imphash": "hash-imphash", + "ip": "ipv4", + "ipv4": "ipv4", + "ipv4-addr": "ipv4", + "ipv4-net": "ipv4-cidr", + "ipv6": "ipv6", + "ipv6-addr": "ipv6", + "ipv6-net": "ipv6-cidr", + "mac": "mac-48", + "mail": "email", + "mail_subject": "email-subject", + "md5": "hash-md5", + "mutex": "mutex", + "organization": "organization", + "phone_number": "telephone", + "registry": "registrar", + "sha256": "hash-sha256", + "sha384": "hash-sha384", + "sha512": "hash-sha512", + "uri": "uri", + "uri_path": "uri", + "url": "uri", + "user-agent": "user-agent", + } + return ioc_types.get(value.lower()) + + @staticmethod + def format_time(value): + if value: + return datetime.fromtimestamp(value // 1000).isoformat() + return None + + @staticmethod + def get_max(value1: Optional[int], value2: Optional[int]) -> Optional[int]: + if value1 and value2: + return max(value2, value1) + return value1 or value2 + + def make_report(self, case_data, source_id): + desc_fields = [ + ("title", "Case Title"), + ("description", "Case Description"), + ("summary", "Case Summary"), + ] + description = "" + for field, title in desc_fields: + if case_data.get(field): + description += f"

{title}: {case_data[field]}

" + + tags = DEFAULT_TAGS.copy() + case_data.get("tags", []) + confidence = CONFIDENCE_MAP.get(case_data.get("severity")) + case_data["severity"] = SEVERITY_MAP.get(case_data.get("severity")) + + case_tag_fields = [ + ("caseId", "Case ID"), + ("severity", "Severity"), + ("impactStatus", "Impact Status"), + ("resolutionStatus", "Resolution Status"), + ("status", "Status"), + ("stage", "Stage"), + ("owner", "Owner"), + ] + for tag_field, title in case_tag_fields: + value = case_data.get(tag_field) + if value: + tags.append(f"{title}: {value}") + description += f"

{title}: {value}

" + + # PROCESS TLP + case_tlp = case_data.get("tlp", None) + if case_tlp and TLP_PAP_MAP.get(case_tlp): + case_tlp = TLP_PAP_MAP[case_tlp] + + # PROCESS PAP + case_pap = case_data.get("pap", None) + if case_pap and TLP_PAP_MAP.get(case_pap): + tags.append(f"PAP: {TLP_PAP_MAP[case_pap]}") + + # deduplicate tags + tags = list(set(tags)) + + _id = "{{https://thehive-project.org}}report-{}".format( + str(uuid.uuid5(uuid.NAMESPACE_X500, case_data.get("id"))) + ) + + report = { + "data": { + "id": _id, + "title": f"{case_data.get('title')} - {case_data.get('caseId')}", + "description": description, + "type": "report", + }, + "meta": { + "estimated_observed_time": self.format_time( + case_data.get("updatedAt", None) + ), + "estimated_threat_start_time": self.format_time( + case_data.get("startDate", None) + ), + "tags": tags, + "tlp_color": case_tlp, + }, + "sources": [{"source_id": source_id}], + } + + if confidence: + report["data"]["confidence"] = dict(type="confidence", value=confidence) + return report + + def make_indicator(self, hive_data, source_id): + if not self.convert_eiq_observable_type(hive_data.get("dataType")): + self.error("Unsupported IOC type") + return None + + ioc_value = hive_data.get("data", None) + description = "" + tags = DEFAULT_TAGS.copy() + hive_data.get("tags", []) + + observable_type = hive_data.get("_type", None) + if observable_type is not None: + tags.append(observable_type) + description += f"

Type: {observable_type}

" + + observable_id = hive_data.get("id", None) + if observable_id is not None: + tags.append("Observable ID: {}".format(observable_id)) + description += f"

Observable ID: {observable_id}

" + + sighted = hive_data.get("sighted", None) + if sighted is True: + tags.append("Sighted") + description += f"

Sighted: True

" + + # PROCESS TLP + tlp = hive_data.get("tlp", None) + tlp_color = TLP_PAP_MAP.get(tlp, None) if tlp else None + + # PROCESS PAP + pap = hive_data.get("pap", None) + if pap and TLP_PAP_MAP.get(pap): + tags.append(f"PAP: {TLP_PAP_MAP[pap]}") + + # deduplicate tags + tags = list(set(tags)) + + _id = "{{https://thehive-project.org}}indicator-{}".format( + str(uuid.uuid5(uuid.NAMESPACE_X500, hive_data["id"])) + ) + + indicator = { + "data": { + "id": _id, + "title": ioc_value, # use the main value as the title + "description": description, # use hive description fields combined + "type": "indicator", + "extracts": [ + { + "kind": self.convert_eiq_observable_type( + hive_data.get("dataType") + ), + "value": ioc_value, + } + ], + }, + "meta": { + "estimated_observed_time": self.format_time( + hive_data.get("updatedAt", None) + ), + "estimated_threat_start_time": self.format_time( + hive_data.get("startDate", None) + ), + "tags": tags, + "tlp_color": tlp_color, + }, + "sources": [{"source_id": source_id}], + } + return indicator + + def get_group_source_id(self): + response = requests.get( + self.eiq_host_url + "/private/groups/", + params=f"filter[name]={self.group_name}", + headers=self.headers, + ) + if response.status_code != 200: + return None + return response.json()["data"][0]["source"] + + def create_relation(self, entity_dict, source_id): + report_id = entity_dict.get("report") + indicator_id = entity_dict.get("indicator") + if not report_id or not indicator_id: + return None + + relation_id = str(uuid.uuid5(uuid.NAMESPACE_X500, f"{report_id}-{indicator_id}")) + relationship = { + "data": [ + { + "id": relation_id, + "data": { + "source": report_id, + "key": "reports", + "target": indicator_id, + }, + "sources": [source_id], + } + ] + } + + response = requests.put( + self.eiq_host_url + "/api/v2/relationships", + json=relationship, + headers=self.headers, + ) + return response + + def run(self): + try: + Responder.run(self) + + hive_data = self.get_param("data") + _type = hive_data.get("_type") + if _type not in ["case", "case_artifact"]: + self.error("Responder not supported") + # FIXME: should we return None here? + case_data = hive_data if _type == "case" else hive_data.get("case") + + source_id = self.get_group_source_id() + if not source_id: + self.error("Invalid Group name") + return + + report = self.make_report(case_data, source_id) + + indicator = None + if _type == "case_artifact": + indicator = self.make_indicator(hive_data, source_id) + if not indicator: + self.error("Unsupported IOC type") + return + + entities = self.submit_entities(report, indicator) + if not entities: + return + entity_ids = { + data["data"]["type"]: data["id"] for data in entities.get("data", []) + } + + relation_response = self.create_relation(entity_ids, source_id) + if relation_response and relation_response.status_code not in [200, 201]: + self.error( + f"While making the relationship, " + f"receiving status: {relation_response.status_code}" + ) + return + + self.report_result(entity_ids) + except Exception as ex: + self.error("Error: {}: ex: {}".format(traceback.format_exc(), ex)) + + def submit_entities(self, report: dict, indicator: dict) -> Optional[dict]: + data = [] + report and data.append(report) + indicator and data.append(indicator) + # case data contains parent case information + json_data = dict(data=data) + response = requests.put( + self.eiq_host_url + "/api/v2/entities", + json=json_data, + headers=self.headers, + ) + if response.status_code not in [200, 201]: + self.error(f"While making the call, receiving {response.status_code}") + return None + + return response.json() + + def report_result(self, entity_ids: dict) -> None: + result = {"message": "Submitted to EclecticIQ Intelligence Center"} + if entity_ids.get("report"): + result["report_platform_link"] = ( + f"{self.eiq_host_url}/entity/{entity_ids.get('report')}" + ) + + if entity_ids.get("indicator"): + result["indicator_platform_link"] = ( + f"{self.eiq_host_url}/entity/{entity_ids.get('indicator')}" + ) + self.report(result) + + def operations(self, raw): + return [ + self.build_operation("AddTagToArtifact", tag="EclecticIQ:Indicator Created") + ] + + +if __name__ == "__main__": + EclecticIQIndicator().run() diff --git a/responders/EclecticIQIndicator/requirements.txt b/responders/EclecticIQIndicator/requirements.txt new file mode 100644 index 000000000..6aabc3cfa --- /dev/null +++ b/responders/EclecticIQIndicator/requirements.txt @@ -0,0 +1,2 @@ +cortexutils +requests diff --git a/responders/FalconCustomIOC/Dockerfile b/responders/FalconCustomIOC/Dockerfile index be26deef1..0069e51d6 100644 --- a/responders/FalconCustomIOC/Dockerfile +++ b/responders/FalconCustomIOC/Dockerfile @@ -1,6 +1,6 @@ -FROM python:2 +FROM python:3 WORKDIR /worker COPY . FalconCustomIOC RUN pip install --no-cache-dir -r FalconCustomIOC/requirements.txt -ENTRYPOINT FalconCustomIOC/FalconCustomIOC.py +ENTRYPOINT FalconCustomIOC/FalconCustomIOCv2.py \ No newline at end of file diff --git a/responders/FalconCustomIOC/FalconCustomIOCv2.json b/responders/FalconCustomIOC/FalconCustomIOCv2.json new file mode 100644 index 000000000..1073f29cf --- /dev/null +++ b/responders/FalconCustomIOC/FalconCustomIOCv2.json @@ -0,0 +1,90 @@ +{ + "name": "Crowdstrike_Falcon_Custom_IOC", + "version": "2.0", + "author": "Nicolas Criton", + "url": "https://www.crowdstrike.com/blog/tech-center/consume-ioc-and-threat-feeds/", + "license": "AGPL-v3", + "description": "Submit observables to the Crowdstrike Falcon Custom IOC API", + "dataTypeList": ["thehive:alert","thehive:case_artifact"], + "command": "FalconCustomIOC/FalconCustomIOCv2.py", + "baseConfig": "FalconCustomIOCv2", + "configurationItems": [ + { + "name": "falconapi_endpoint", + "description": "CrowdStrike API endpoints: US-1 | US-2 | US-GOV-1 | EU-1", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "falconapi_clientid", + "description": "Crowdstrike Falcon Client ID Oauth2 API client", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "falconapi_key", + "description": "Crowdstrike Falcon Oauth2 API Key", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "domain_block_expiration_days", + "description": "How many days should we block the domain IOCs sent? Default: 30", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 30 + }, + { + "name": "ip_block_expiration_days", + "description": "How many days should we block the ip IOCs sent? Default: 30", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 30 + }, + { + "name": "hash_block_expiration_days", + "description": "How many days should we block the hash IOCs sent? Default: 30", + "type": "number", + "multi": false, + "required": false, + "defaultValue": 30 + }, + { + "name": "action_to_take", + "description": "How the IOCs should be handled by Falcon ? Choose between 'no_action' or 'detect' -> no_action: Save the indicator for future use, but take no action / detect: Enable detections for the indicator at the selected severity (Default: detect)", + "type": "string", + "multi": false, + "required": false, + "defaultValue": "detect" + }, + { + "name": "severity_level", + "description": "Severity level when IOCs are ingested by Falcon CustomIOC: informational / low / medium / high / critical - Default: high", + "type": "string", + "multi": false, + "required": false, + "defaultValue": "high" + }, + { + "name": "tag_added_to_cs", + "description": "Tag added to the IOC in Falcon platform - Default: Cortex Incident - FalconCustomIOC", + "type": "string", + "multi": false, + "required": false, + "defaultValue": "Cortex Incident - FalconCustomIOC" + }, + { + "name": "tag_added_to_thehive", + "description": "Tag added to the IOC in TheHive platform - Default: Falcon:Custom IOC Uploaded", + "type": "string", + "multi": false, + "required": false, + "defaultValue": "Falcon:Custom IOC Uploaded" + } + ] +} \ No newline at end of file diff --git a/responders/FalconCustomIOC/FalconCustomIOCv2.py b/responders/FalconCustomIOC/FalconCustomIOCv2.py new file mode 100644 index 000000000..6653ed148 --- /dev/null +++ b/responders/FalconCustomIOC/FalconCustomIOCv2.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import requests +import re +import json +import ipaddress + +from cortexutils.responder import Responder +from cortexutils.extractor import Extractor +from falconpy import OAuth2, IOC +from dateutil.relativedelta import relativedelta +from datetime import datetime + + +class FalconCustomIOC(Responder): + def __init__(self): + Responder.__init__(self) + self.falconapi_endpoint = self.get_param( + "config.falconapi_endpoint", None, "Falcon API Endpoint: US-1 | US-2 | US-GOV-1 | EU-1", + ) + self.falconapi_clientid = self.get_param( + "config.falconapi_clientid", None, "Falcon clientid missing" + ) + self.falconapi_key = self.get_param( + "config.falconapi_key", None, "Falcon api key missing" + ) + self.domain_block_expiration_days = self.get_param( + "config.domain_block_expiration_days", 30 + ) + self.ip_block_expiration_days = self.get_param( + "config.ip_block_expiration_days", 30 + ) + self.hash_block_expiration_days = self.get_param( + "config.hash_block_expiration_days", 30 + ) + self.action_to_take = self.get_param( + "config.action_to_take", "detect" + ) + self.severity_level = self.get_param( + "config.severity_level", "high" + ) + self.tag_added_to_cs = self.get_param( + "config.tag_added_to_cs", "Cortex Incident - FalconCustomIOC" + ) + self.tag_added_to_thehive = self.get_param( + "config.tag_added_to_thehive", "CrowdStrike:Custom IOC Uploaded" + ) + + def run(self): + try: + Responder.run(self) + ioctypes = { + "hash": "sha256", + "sha256": "sha256", + "md5": "md5", + "ip": "ipv4", + "ipv4": "ipv4", + "ip6": "ipv6", + "ipv6": "ipv6", + "domain": "domain", + "url": "domain", + } + + data_type = self.get_param("data.dataType") + if not data_type in ioctypes: + self.error("Unsupported IOC type") + return False + ioc = self.get_param("data.data", None, "No IOC provided") + + if data_type == "url": + match = re.match(r"(http:\/\/|https:\/\/)?([\w\-\.]{0,256}).*", ioc) + if match is None or match.group(2) is None: + self.error("Could not parse iocs from URL") + return False + else: + ioc = match.group(2) + data_type = Extractor().check_string(ioc) + + if data_type == "ip": + try: + ip_check = ipaddress.ip_address(ioc) + except Exception as e: + self.error(f"Could not check IP type from IOC : {e}") + return False + if isinstance(ip_check, ipaddress.IPv6Address): + data_type = "ipv6" + elif isinstance(ip_check, ipaddress.IPv4Address): + data_type = "ipv4" + else: + self.error("Could not determine IP type from IOC") + return False + + if data_type == "hash": + if len(ioc) == 32: + data_type = "md5" + elif len(ioc) == 40: + self.error("Unsupported IOC type") + return False + elif len(ioc) == 64: + data_type = "sha256" + + if data_type in ("fqdn", "domain"): + expiration_date = datetime.today() + relativedelta(days=self.domain_block_expiration_days) + elif data_type in ("ip", "ipv4", "ipv6", "ip6"): + expiration_date = datetime.today() + relativedelta(days=self.ip_block_expiration_days) + elif data_type in ("hash", "sha256", "md5"): + expiration_date = datetime.today() + relativedelta(days=self.hash_block_expiration_days) + expiration = expiration_date.strftime("%Y-%m-%dT%H:%M:%SZ") + + incident_title = self.get_param("data.case.title", None, "Can't get case title").encode("utf-8")[:128] + + auth = OAuth2( + client_id=self.falconapi_clientid, + client_secret=self.falconapi_key, + base_url=self.falconapi_endpoint + ) + + falcon_api = IOC(auth_object=auth) + response = falcon_api.indicator_create(action=self.action_to_take, + applied_globally=True, + comment="TheHive IOC incident", + description=incident_title.decode("utf-8"), + expiration=expiration, + filename="", + ignore_warnings=False, + platforms='mac,windows,linux', + severity=self.severity_level, + source="Cortex - FalconCustomIOC [" + incident_title.decode("utf-8") + "]", + tags=self.tag_added_to_cs, + type=ioctypes[data_type], + value=ioc.strip() + ) + + response_error = str(response['body']['errors']) + response_ressources = str(response['body']['resources']) + + if response['body']['errors'] is None: + self.report( + {"message": f"{ioc} successuflly submitted to Crowdstrike Falcon custom IOC api - status code: {response['status_code']}"} + ) + elif 'Duplicate type' in response_ressources: + self.error(f"Not submitted because of duplicated entry - {ioc} already found on your Falcon CustomIOC database") + return False + else: + self.error(f"Error: unable to complete action - received {response['status_code']} status code from FalconIOC API with the following message: {response_error}") + return False + + except Exception as ex: + self.error(f"Unable to send IOC to FalconCustomIOC API: {ex}") + return False + return True + + def operations(self, raw): + return [ + self.build_operation( + "AddTagToArtifact", tag=self.tag_added_to_thehive + ) + ] + +if __name__ == "__main__": + FalconCustomIOC().run() \ No newline at end of file diff --git a/responders/FalconCustomIOC/requirements.txt b/responders/FalconCustomIOC/requirements.txt index 6aabc3cfa..ff452c900 100644 --- a/responders/FalconCustomIOC/requirements.txt +++ b/responders/FalconCustomIOC/requirements.txt @@ -1,2 +1,4 @@ cortexutils -requests +crowdstrike-falconpy +datetime +python-dateutil \ No newline at end of file diff --git a/responders/JAMFProtect/JAMFProtect_IOC.py b/responders/JAMFProtect/JAMFProtect_IOC.py new file mode 100755 index 000000000..250d9ff16 --- /dev/null +++ b/responders/JAMFProtect/JAMFProtect_IOC.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python3 + +from cortexutils.responder import Responder +import re +from urllib.parse import urlparse +import requests +import json + +class JAMFProtect_IOC(Responder): + def __init__(self): + Responder.__init__(self) + self.base_url = self.get_param("config.base_url") + self.client_id = self.get_param("config.client_id") + self.password = self.get_param("config.password") + self.service = self.get_param("config.service", None) + + def identify_and_extract(self, input_string): + # regular expressions for different types + patterns = { + "sha256": re.compile(r"^[a-fA-F0-9]{64}$"), + "md5": re.compile(r"^[a-fA-F0-9]{32}$"), + "sha1": re.compile(r"^[a-fA-F0-9]{40}$"), + "ipv4": re.compile(r"^(\d{1,3}\.){3}\d{1,3}$"), + "ipv6": re.compile(r"^([0-9a-fA-F]{1,4}:){7}([0-9a-fA-F]{1,4}|:)|(([0-9a-fA-F]{1,4}:){1,7}|:)(:([0-9a-fA-F]{1,4}|:)){1,7}$"), + "domain": re.compile(r"^(?!:\/\/)([a-zA-Z0-9-_]+\.)*([a-zA-Z0-9-_]{2,})(\.[a-zA-Z]{2,11})$") + } + + # check if the input_string matches any of the patterns + for key, pattern in patterns.items(): + if pattern.match(input_string): + return key, input_string + + # check if the input_string is a URL and extract the domain + try: + parsed_url = urlparse(input_string) + if parsed_url.scheme and parsed_url.netloc: + domain = parsed_url.netloc + # handle URLs with "www." + if domain.startswith("www."): + domain = domain[4:] + return "domain", domain + except Exception as e: + self.error(f"Error parsing URL: {e}") + + return None + + def get_jamf_token(self, base_url: str, client_id: str, password: str) -> str: + """ + Function to obtain a token from the Jamf Protect API. + + Parameters: + - base_url (str): The base URL of your Jamf Protect instance (e.g., "https://mycompany.protect.jamfcloud.com"). + - client_id (str): The client ID for authentication. + - password (str): The password for authentication. + + Returns: + - str: The access token if successful, raises an exception if it fails. + """ + token_url = f"{base_url}/token" + headers = {'content-type': 'application/json'} + data = { + "client_id": client_id, + "password": password + } + + try: + response = requests.post(token_url, headers=headers, data=json.dumps(data)) + response.raise_for_status() + access_token = response.json().get('access_token') + if access_token: + return access_token + else: + raise ValueError("Failed to retrieve access token.") + except requests.exceptions.RequestException as e: + raise RuntimeError(f"Failed to obtain token: {e}") + + def add_hash_to_prevention_list(self, base_url: str, token: str, list_name: str, description: str, hash_value: str, tags: list): + """ + Function to add a hash to a custom prevention list in Jamf Protect using GraphQL. + """ + graphql_url = f"{base_url}/graphql" + headers = { + "Authorization": f"{token}", + "Content-Type": "application/json" + } + + # Construct the GraphQL mutation payload + payload = { + "operationName": "createPreventList", + "variables": { + "name": list_name, + "description": description, + "type": "FILEHASH", + "list": [hash_value], + "tags": tags + }, + "query": """ + mutation createPreventList($name: String!, $tags: [String]!, $type: PREVENT_LIST_TYPE!, $list: [String]!, $description: String) { + createPreventList( + input: {name: $name, tags: $tags, type: $type, list: $list, description: $description} + ) { + ...PreventListFields + __typename + } + } + + fragment PreventListFields on PreventList { + id + name + type + count + list + created + description + __typename + } + """ + } + # Make the GraphQL request + response = requests.post(graphql_url, headers=headers, json=payload) + response.raise_for_status() + + result = response.json() + if 'errors' in result: + return f"Failed to add hash to prevention list: {result['errors']}" + else: + return f"Hash {hash_value} successfully added to prevention list {list_name}." + + def get_prevention_list_id(self, base_url: str, token: str, list_name: str) -> str: + """ + Function to get the ID of a prevention list by its name. + """ + graphql_url = f"{base_url}/graphql" + headers = { + "Authorization": f"{token}", + "Content-Type": "application/json" + } + + payload = { + "operationName": "listPreventLists", + "variables": { + "nextToken": None, + "direction": "ASC", + "field": "created", + "filter": None + }, + "query": """ + query listPreventLists($nextToken: String, $direction: OrderDirection!, $field: PreventListOrderField!, $filter: PreventListFilterInput) { + listPreventLists( + input: {next: $nextToken, order: {direction: $direction, field: $field}, pageSize: 100, filter: $filter} + ) { + items { + ...PreventListFields + __typename + } + pageInfo { + next + total + __typename + } + __typename + } + } + + fragment PreventListFields on PreventList { + id + name + type + count + list + created + description + __typename + } + """ + } + + + response = requests.post(graphql_url, headers=headers, json=payload) + response.raise_for_status() + + # check if the response contains valid json data + try: + result = response.json() + except ValueError as e: + raise RuntimeError(f"Failed to decode JSON response: {e}") + + prevention_lists = result['data']['listPreventLists']['items'] + + prevention_lists_ids = [] + # Search for the list with the specified name + for prevention_list in prevention_lists: + if prevention_list['name'] == list_name: + prevention_lists_ids.append(prevention_list['id']) + + if prevention_lists_ids == []: + raise ValueError(f"No prevention list found with name: {list_name}") + + return prevention_lists_ids + + + + def delete_prevention_list(self, base_url: str, token: str, prevent_list_ids: list): + """ + Function to delete a prevention list in Jamf Protect using GraphQL. + """ + graphql_url = f"{base_url}/graphql" + headers = { + "Authorization": f"{token}", + "Content-Type": "application/json" + } + + failed_deletions = [] + + for prevent_list_id in prevent_list_ids: + # Construct the GraphQL mutation payload + payload = { + "operationName": "deletePreventList", + "variables": { + "id": prevent_list_id + }, + "query": """ + mutation deletePreventList($id: ID!) { + deletePreventList(id: $id) { + id + __typename + } + } + """ + } + + # Make the GraphQL request + response = requests.post(graphql_url, headers=headers, json=payload) + response.raise_for_status() + + result = response.json() + if 'errors' in result: + failed_deletions.append(prevent_list_id) + + if failed_deletions: + return f"Failed to delete prevention list(s): {', '.join(failed_deletions)}" + + return f"Prevention list with ID(s) {', '.join(prevent_list_ids)} successfully deleted." + + + def run(self): + result = "" + observable_value = self.get_param("data.data", None) + ioc_type, ioc_value = self.identify_and_extract(observable_value) + if ioc_type not in ["sha256", "sha1"]: + self.error("error -- Not a hash or a valid hash : sha1 or sha256") + + case_title = self.get_param("data.case.title", None, "Can't get case title") + case_id = self.get_param("data.case.id", None, "Can't get case ID") + description = f"Pushed from TheHive - {case_title} - {case_id}" + + if self.service == "addIOC": + + token = self.get_jamf_token(self.base_url, self.client_id, self.password) + + result = self.add_hash_to_prevention_list(self.base_url,token, description, description, ioc_value, ["TheHive", f"{case_id}"]) + elif self.service == "removeIOC": + token = self.get_jamf_token(self.base_url, self.client_id, self.password) + + prevention_list_ids = self.get_prevention_list_id(self.base_url, token, description) + result = self.delete_prevention_list(self.base_url, token, prevention_list_ids) + + if 'error' in result: + self.error(result) + + self.report({"message": result}) + + + + +if __name__ == '__main__': + JAMFProtect_IOC().run() \ No newline at end of file diff --git a/responders/JAMFProtect/JAMFProtect_addHashtoPreventList.json b/responders/JAMFProtect/JAMFProtect_addHashtoPreventList.json new file mode 100644 index 000000000..162521b72 --- /dev/null +++ b/responders/JAMFProtect/JAMFProtect_addHashtoPreventList.json @@ -0,0 +1,50 @@ +{ + "name": "JAMFProtect_addHashtoPreventList", + "version": "1.0", + "author": "nusantara-self, StrangeBee", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Add IOC to JAMF Protect - creates a custom prevent list for a hash", + "dataTypeList": [ + "thehive:case_artifact" + ], + "command": "JAMFProtect/JAMFProtect_IOC.py", + "baseConfig": "JAMFProtect", + "config": { + "service": "addIOC" + }, + "configurationItems": [ + { + "name": "base_url", + "description": "JAMF Protect base url", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://mycompany.protect.jamfcloud.com" + }, + { + "name": "client_id", + "description": "JAMF Protect client ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "" + }, + { + "name": "password", + "description": "JAMF Protect password", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "" + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.jamf.com/products/jamf-protect/", + "service_logo": { + "path": "assets/jamfprotect.png", + "caption": "JAMF Protect logo" + } +} diff --git a/responders/JAMFProtect/JAMFProtect_removeHashfromPreventList.json b/responders/JAMFProtect/JAMFProtect_removeHashfromPreventList.json new file mode 100644 index 000000000..d152136c0 --- /dev/null +++ b/responders/JAMFProtect/JAMFProtect_removeHashfromPreventList.json @@ -0,0 +1,50 @@ +{ + "name": "JAMFProtect_removeHashfromPreventList", + "version": "1.0", + "author": "nusantara-self, StrangeBee", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Remove IOC on JAMF Protect - removes associated custom prevent list(s) containing the hash", + "dataTypeList": [ + "thehive:case_artifact" + ], + "command": "JAMFProtect/JAMFProtect_IOC.py", + "baseConfig": "JAMFProtect", + "config": { + "service": "removeIOC" + }, + "configurationItems": [ + { + "name": "base_url", + "description": "JAMF Protect base url", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://mycompany.protect.jamfcloud.com" + }, + { + "name": "client_id", + "description": "JAMF Protect client ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "" + }, + { + "name": "password", + "description": "JAMF Protect password", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "" + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.jamf.com/products/jamf-protect/", + "service_logo": { + "path": "assets/jamfprotect.png", + "caption": "JAMF Protect logo" + } +} diff --git a/responders/JAMFProtect/README.md b/responders/JAMFProtect/README.md new file mode 100644 index 000000000..e61dc54d6 --- /dev/null +++ b/responders/JAMFProtect/README.md @@ -0,0 +1,10 @@ +### JAMF Protect Prevent List + +This responder manages [JAMF Protect prevent lists](https://docs.jamf.com/jamf-protect/administrator-guide/Prevent_Lists.html) by adding or removing hashes as needed. + +#### Setup +- Navigate to **Administrative** > **Account** +- Create a role **PreventList-Write** with permissions **Prevent Lists: Read & Write** +- Create an API client and assign the above role +- Use these API credentials in your responders + diff --git a/responders/JAMFProtect/assets/jamfprotect.png b/responders/JAMFProtect/assets/jamfprotect.png new file mode 100644 index 000000000..b3e566f23 Binary files /dev/null and b/responders/JAMFProtect/assets/jamfprotect.png differ diff --git a/responders/JAMFProtect/requirements.txt b/responders/JAMFProtect/requirements.txt new file mode 100644 index 000000000..4a21dbf63 --- /dev/null +++ b/responders/JAMFProtect/requirements.txt @@ -0,0 +1,2 @@ +cortexutils +requests \ No newline at end of file diff --git a/responders/MSDefenderEndpoints/Dockerfile b/responders/MSDefenderEndpoints/Dockerfile index 6f153365f..6d3fed6a2 100644 --- a/responders/MSDefenderEndpoints/Dockerfile +++ b/responders/MSDefenderEndpoints/Dockerfile @@ -17,5 +17,5 @@ FROM python:3 WORKDIR /worker COPY . MSDefenderEndpoints -RUN test ! -e MSDefenderEndpoints/requirements.txt || pip install --no-cache-dir -rMSDefenderEndpoints/requirements.txt -ENTRYPOINT MSDefenderEndpoints/MSDefenderEndpoints.py \ No newline at end of file +RUN test ! -e MSDefenderEndpoints/requirements.txt || pip install --no-cache-dir -r MSDefenderEndpoints/requirements.txt +ENTRYPOINT MSDefenderEndpoints/MSDefenderEndpoints.py diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints.py b/responders/MSDefenderEndpoints/MSDefenderEndpoints.py index 8775fd5f9..c7baaff02 100755 --- a/responders/MSDefenderEndpoints/MSDefenderEndpoints.py +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints.py @@ -7,7 +7,7 @@ import datetime class MSDefenderEndpoints(Responder): - def __init__(self): + def __init__(self): Responder.__init__(self) self.msdefenderTenantId = self.get_param('config.tenantId', None, 'TenantId missing!') self.msdefenderAppId = self.get_param('config.appId', None, 'AppId missing!') @@ -29,9 +29,9 @@ def __init__(self): } ) - def run(self): + def run(self): Responder.run(self) - url = "{}{}/oauth2/token".format( + url = "{}/{}/oauth2/token".format( self.msdefenderOAuthUri,self.msdefenderTenantId ) @@ -77,6 +77,8 @@ def getMachineId(id): if response.status_code == 200: jsonResponse = response.json() if len(response.content) > 100: + if jsonResponse["value"][0]["aadDeviceId"] is None: + return jsonResponse["value"][0]["id"] return jsonResponse["value"][0]["aadDeviceId"] else: self.error({'message': "Can't get hostname from Microsoft API"}) @@ -153,16 +155,102 @@ def unisolateMachine(machineId): except requests.exceptions.RequestException as e: self.error({'message': e}) - def pushCustomIocAlert(ipAddress): - action="Alert" + + def restrictAppExecution(machineId): + ''' + example + POST https://api.securitycenter.windows.com/api/machines/{id}/restrictCodeExecution + ''' + url = 'https://api.securitycenter.windows.com/api/machines/{}/restrictCodeExecution'.format(machineId) + body = { + 'Comment': 'Restrict code execution due to TheHive case {}'.format(self.caseId) + } + + try: + response = self.msdefenderSession.post(url=url, json=body) + if response.status_code == 201: + self.report({'message': "Restricted app execution on machine: " + self.observable }) + elif response.status_code == 400 and "ActiveRequestAlreadyExists" in response.content.decode("utf-8"): + self.report({'message': "Error restricting app execution on machine: ActiveRequestAlreadyExists"}) + else: + self.error({'message': "Can't restrict app execution"}) + except requests.exceptions.RequestException as e: + self.error({'message': e}) + + + def unrestrictAppExecution(machineId): + ''' + example + POST https://api.securitycenter.windows.com/api/machines/{id}/unrestrictCodeExecution + ''' + url = 'https://api.securitycenter.windows.com/api/machines/{}/unrestrictCodeExecution'.format(machineId) + body = { + 'Comment': '"Remove code execution restriction since machine was cleaned and validated due to TheHive case {}'.format(self.caseId) + } + + try: + response = self.msdefenderSession.post(url=url, json=body) + if response.status_code == 201: + self.report({'message': "Removed app execution restriction on machine: " + self.observable }) + elif response.status_code == 400 and "ActiveRequestAlreadyExists" in response.content.decode("utf-8"): + self.report({'message': "Error removing app execution restriction on machine: ActiveRequestAlreadyExists"}) + else: + self.error({'message': "Can't unrestrict app execution"}) + except requests.exceptions.RequestException as e: + self.error({'message': e}) + + + def startAutoInvestigation(machineId): + ''' + example + POST https://api.securitycenter.windows.com/api/machines/{id}/startInvestigation + ''' + url = 'https://api.securitycenter.windows.com/api/machines/{}/startInvestigation'.format(machineId) + + body = { + 'Comment': 'Start investigation due to TheHive case {}'.format(self.caseId) + } + + try: + response = self.msdefenderSession.post(url=url, json=body) + if response.status_code == 201: + self.report({'message': "Started Auto Investigation on : " + self.observable }) + elif response.status_code == 400 and "ActiveRequestAlreadyExists" in response.content.decode("utf-8"): + self.report({'message': "Error lauching auto investigation on machine: ActiveRequestAlreadyExists"}) + else: + self.error({'message': "Error auto investigation on machine"}) + except requests.exceptions.RequestException as e: + self.error({'message': e}) + + + def pushCustomIocAlert(observable): + + if self.observableType == 'ip': + indicatorType = 'IpAddress' + elif self.observableType == 'url': + indicatorType = 'Url' + elif self.observableType == 'domain': + indicatorType = 'DomainName' + elif self.observableType == 'hash': + if len(observable) == 32: + indicatorType = 'FileMd5' + elif len(observable) == 40: + indicatorType = 'FileSha1' + elif len(observable) == 64: + indicatorType = 'FileSha256' + else: + self.report({'message':"Observable is not a valid hash"}) + else: + self.error({'message':"Observable type must be ip, url, domain or hash"}) + url = 'https://api.securitycenter.windows.com/api/indicators' body = { - 'indicatorValue': ipAddress, - 'indicatorType': 'IpAddress', - 'action': action, - 'title': self.caseTitle, + 'indicatorValue': observable, + 'indicatorType': indicatorType, + 'action': 'Alert', + 'title': "TheHive IOC: {}".format(self.caseTitle), 'severity': 'High', - 'description': self.caseTitle, + 'description': "TheHive case: {} - caseId {}".format(self.caseTitle,self.caseId), 'recommendedActions': 'N/A' } @@ -173,13 +261,31 @@ def pushCustomIocAlert(ipAddress): except requests.exceptions.RequestException as e: self.error({'message': e}) - def pushCustomIocBlock(ipAddress): - action="AlertAndBlock" + def pushCustomIocBlock(observable): + + if self.observableType == 'ip': + indicatorType = 'IpAddress' + elif self.observableType == 'url': + indicatorType = 'Url' + elif self.observableType == 'domain': + indicatorType = 'DomainName' + elif self.observableType == 'hash': + if len(observable) == 32: + indicatorType = 'FileMd5' + elif len(observable) == 40: + indicatorType = 'FileSha1' + elif len(observable) == 64: + indicatorType = 'FileSha256' + else: + self.report({'message':"Observable is not a valid hash"}) + else: + self.error({'message':"Observable type must be ip, url, domain or hash"}) + url = 'https://api.securitycenter.windows.com/api/indicators' body = { - 'indicatorValue' : ipAddress, - 'indicatorType' : 'IpAddress', - 'action' : action, + 'indicatorValue' : observable, + 'indicatorType' : indicatorType, + 'action' : 'AlertAndBlock', 'title' : "TheHive IOC: {}".format(self.caseTitle), 'severity' : 'High', 'description' : "TheHive case: {} - caseId {}".format(self.caseTitle,self.caseId), @@ -193,13 +299,19 @@ def pushCustomIocBlock(ipAddress): except requests.exceptions.RequestException as e: self.error({'message': e}) - # print("blop") + if self.service == "isolateMachine": isolateMachine(getMachineId(self.observable)) elif self.service == "unisolateMachine": unisolateMachine(getMachineId(self.observable)) elif self.service == "runFullVirusScan": runFullVirusScan(getMachineId(self.observable)) + elif self.service == "restrictAppExecution": + restrictAppExecution(getMachineId(self.observable)) + elif self.service == "unrestrictAppExecution": + unrestrictAppExecution(getMachineId(self.observable)) + elif self.service == "startAutoInvestigation": + startAutoInvestigation(getMachineId(self.observable)) elif self.service == "pushIOCBlock": pushCustomIocBlock(self.observable) elif self.service == "pushIOCAlert": @@ -207,7 +319,7 @@ def pushCustomIocBlock(ipAddress): else: self.error({'message': "Unidentified service"}) - def operations(self, raw): + def operations(self, raw): self.build_operation('AddTagToCase', tag='MSDefenderResponder:run') if self.service == "isolateMachine": return [self.build_operation("AddTagToArtifact", tag="MsDefender:isolated")] @@ -215,6 +327,10 @@ def operations(self, raw): return [self.build_operation("AddTagToArtifact", tag="MsDefender:fullVirusScan")] elif self.service == "unisolateMachine": return [self.build_operation("AddTagToArtifact", tag="MsDefender:unIsolated")] + elif self.service == "restrictAppExecution": + return [self.build_operation("AddTagToArtifact", tag="MsDefender:restrictedAppExec")] + elif self.service == "unrestrictAppExecution": + return [self.build_operation("AddTagToArtifact", tag="MsDefender:unrestrictedAppExec")] if __name__ == '__main__': diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_AutoInvestigation.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_AutoInvestigation.json new file mode 100644 index 000000000..ac4ece72c --- /dev/null +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_AutoInvestigation.json @@ -0,0 +1,61 @@ +{ + "name": "MSDefender-AutoInvestigation", + "version": "1.0", + "author": "Keijo Korte, Louis-Maximilien Dupouy", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Start an automated investigation on a device", + "dataTypeList": ["thehive:case_artifact"], + "command": "MSDefenderEndpoints/MSDefenderEndpoints.py", + "baseConfig": "MSDefenderforEndpoints", + "config": { + "service": "startAutoInvestigation" + }, + "configurationItems": [ + { + "name": "tenantId", + "description": "Azure tenant ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "abcdef12-ab12-abc12-ab12-abcdef123456" + }, + { + "name": "appId", + "description": "Azure app ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "abcdef12-ab12-abc12-ab12-abcdef123456" + }, + { + "name": "appSecret", + "description": "Azure app secret", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890=" + }, + { + "name": "resourceAppIdUri", + "description": "Security Center URI, usually doens't need to change", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://api.securitycenter.windows.com" + }, + { + "name": "oAuthUri", + "description": "Azure oAuth2 authentication endpoint", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://login.microsoftonline.com" + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://securitycenter.windows.com" + } + \ No newline at end of file diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_Isolate.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_Isolate.json index 9dcc99547..e78dac91e 100644 --- a/responders/MSDefenderEndpoints/MSDefenderEndpoints_Isolate.json +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_Isolate.json @@ -50,7 +50,7 @@ "type": "string", "multi": false, "required": true, - "defaultValue": "https://login.windows.net/" + "defaultValue": "https://login.microsoftonline.com" } ], "registration_required": true, diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCAlert.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCAlert.json index fe9c10a2f..255fa328c 100644 --- a/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCAlert.json +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCAlert.json @@ -1,7 +1,7 @@ { "name": "MSDefender-PushIOC-Alert", - "version": "1.0", - "author": "Keijo Korte", + "version": "2.0", + "author": "Keijo Korte, Louis-Maximilien Dupouy", "url": "https://github.com/TheHive-Project/Cortex-Analyzers", "license": "AGPL-V3", "description": "Push IOC to Defender client. Alert mode", @@ -50,7 +50,7 @@ "type": "string", "multi": false, "required": true, - "defaultValue": "https://login.windows.net/" + "defaultValue": "https://login.microsoftonline.com" } ], "registration_required": true, diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCBlock.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCBlock.json index d87914e25..eb211d7cd 100644 --- a/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCBlock.json +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_PushIOCBlock.json @@ -1,7 +1,7 @@ { "name": "MSDefender-PushIOC-Block", - "version": "1.0", - "author": "Keijo Korte", + "version": "2.0", + "author": "Keijo Korte, Louis-Maximilien Dupouy", "url": "https://github.com/TheHive-Project/Cortex-Analyzers", "license": "AGPL-V3", "description": "Push IOC to Defender client. Blocking mode", @@ -50,7 +50,7 @@ "type": "string", "multi": false, "required": true, - "defaultValue": "https://login.windows.net/" + "defaultValue": "https://login.microsoftonline.com" } ], "registration_required": true, diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_RestrictAppExecution.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_RestrictAppExecution.json new file mode 100644 index 000000000..525a80990 --- /dev/null +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_RestrictAppExecution.json @@ -0,0 +1,61 @@ +{ + "name": "MSDefender-RestrictAppExecution", + "version": "1.0", + "author": "Keijo Korte, Louis-Maximilien Dupouy", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Restrict execution of all applications on the device except a predefined set", + "dataTypeList": ["thehive:case_artifact"], + "command": "MSDefenderEndpoints/MSDefenderEndpoints.py", + "baseConfig": "MSDefenderforEndpoints", + "config": { + "service": "restrictAppExecution" + }, + "configurationItems": [ + { + "name": "tenantId", + "description": "Azure tenant ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "abcdef12-ab12-abc12-ab12-abcdef123456" + }, + { + "name": "appId", + "description": "Azure app ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "abcdef12-ab12-abc12-ab12-abcdef123456" + }, + { + "name": "appSecret", + "description": "Azure app secret", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890=" + }, + { + "name": "resourceAppIdUri", + "description": "Security Center URI, usually doens't need to change", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://api.securitycenter.windows.com" + }, + { + "name": "oAuthUri", + "description": "Azure oAuth2 authentication endpoint", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://login.microsoftonline.com" + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://securitycenter.windows.com" + } + \ No newline at end of file diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_UnRestrictAppExecution.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_UnRestrictAppExecution.json new file mode 100644 index 000000000..7b0c20d6a --- /dev/null +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_UnRestrictAppExecution.json @@ -0,0 +1,60 @@ +{ + "name": "MSDefender-UnRestrictAppExecution", + "version": "1.0", + "author": "Keijo Korte, Louis-Maximilien Dupouy", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Enable execution of any application on the device", + "dataTypeList": ["thehive:case_artifact"], + "command": "MSDefenderEndpoints/MSDefenderEndpoints.py", + "baseConfig": "MSDefenderforEndpoints", + "config": { + "service": "unrestrictAppExecution" + }, + "configurationItems": [ + { + "name": "tenantId", + "description": "Azure tenant ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "abcdef12-ab12-abc12-ab12-abcdef123456" + }, + { + "name": "appId", + "description": "Azure app ID", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "abcdef12-ab12-abc12-ab12-abcdef123456" + }, + { + "name": "appSecret", + "description": "Azure app secret", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890=" + }, + { + "name": "resourceAppIdUri", + "description": "Security Center URI, usually doens't need to change", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://api.securitycenter.windows.com" + }, + { + "name": "oAuthUri", + "description": "Azure oAuth2 authentication endpoint", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://login.microsoftonline.com" + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://securitycenter.windows.com" +} diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_Unisolate.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_Unisolate.json index 32ee5b4cd..eda10343b 100644 --- a/responders/MSDefenderEndpoints/MSDefenderEndpoints_Unisolate.json +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_Unisolate.json @@ -50,7 +50,7 @@ "type": "string", "multi": false, "required": true, - "defaultValue": "https://login.windows.net/" + "defaultValue": "https://login.microsoftonline.com" } ], "registration_required": true, diff --git a/responders/MSDefenderEndpoints/MSDefenderEndpoints_VirusScan.json b/responders/MSDefenderEndpoints/MSDefenderEndpoints_VirusScan.json index cccbaf2f5..69a9f9645 100644 --- a/responders/MSDefenderEndpoints/MSDefenderEndpoints_VirusScan.json +++ b/responders/MSDefenderEndpoints/MSDefenderEndpoints_VirusScan.json @@ -50,7 +50,7 @@ "type": "string", "multi": false, "required": true, - "defaultValue": "https://login.windows.net/" + "defaultValue": "https://login.microsoftonline.com" } ], "registration_required": true, diff --git a/responders/MSDefenderEndpoints/README.md b/responders/MSDefenderEndpoints/README.md index cb338f07a..66394dba9 100644 --- a/responders/MSDefenderEndpoints/README.md +++ b/responders/MSDefenderEndpoints/README.md @@ -4,7 +4,10 @@ * Isolate machine * Unisolate machine +* Restrict App Execution on a machine +* Remove app restriction on a machine * Run full antivirus scan +* Run an automated scan * Push IoC to Microsoft defender * Alert * BlockAndAlert @@ -37,7 +40,7 @@ In the registration form: ##### API permission On your new application page, click API Permissions > Add permission > APIs my organization uses > type **WindowsDefenderATP** and click on WindowsDefenderATP -Choose Application permissions, select **Alert.Read.All** AND **TI.ReadWrite.All** AND **Machine.ReadAll** AND **Machine.Isolate** AND **Machine.Scan** > Click on Add permissions. +Choose Application permissions, select **Alert.Read.All** AND **TI.ReadWrite.All** AND **Machine.ReadAll** AND **Machine.Isolate** AND **Machine.Scan** AND **Machine.RestrictExecution** > Click on Add permissions. After clicking the Add Permissions button, on the next screen we need to grant consent for the permission to take effect. Press the "Grant admin consent for {your tenant name}" button. diff --git a/responders/MSEntraID/MSEntraID.py b/responders/MSEntraID/MSEntraID.py new file mode 100755 index 000000000..4b8526cf6 --- /dev/null +++ b/responders/MSEntraID/MSEntraID.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +# Author: Daniel Weiner @dmweiner, revised by @jahamilto +import requests +import traceback +import datetime +from cortexutils.responder import Responder + +# Initialize Azure Class +class MSEntraID(Responder): + def __init__(self): + Responder.__init__(self) + self.client_id = self.get_param('config.client_id', None, 'Microsoft Entra ID Application ID/Client ID Missing') + self.client_secret = self.get_param('config.client_secret', None, 'Microsoft Entra ID Registered Application Client Secret Missing') + self.tenant_id = self.get_param('config.tenant_id', None, 'Microsoft Entra ID Tenant ID Mising') + self.time = '' + def run(self): + Responder.run(self) + + if self.get_param('data.dataType') == 'mail': + try: + self.user = self.get_param('data.data', None, 'No UPN supplied to revoke credentials for') + if not self.user: + self.error("No user supplied") + + token_data = { + "grant_type": "client_credentials", + 'client_id': self.client_id, + 'client_secret': self.client_secret, + 'resource': 'https://graph.microsoft.com', + 'scope': 'https://graph.microsoft.com' + } + + + #Authenticate to the graph api + + redirect_uri = "https://login.microsoftonline.com/{}/oauth2/token".format(self.tenant_id) + token_r = requests.post(redirect_uri, data=token_data) + token = token_r.json().get('access_token') + + if token_r.status_code != 200: + self.error('Failure to obtain azure access token: {}'.format(token_r.content)) + + # Set headers for future requests + headers = { + 'Authorization': 'Bearer {}'.format(token) + } + + base_url = 'https://graph.microsoft.com/v1.0/' + + r = requests.post(base_url + 'users/{}/revokeSignInSessions'.format(self.user), headers=headers) + + if r.status_code != 200: + self.error('Failure to revoke access tokens of user {}: {}'.format(self.user, r.content)) + + else: + #record time of successful auth token revokation + self.time = datetime.datetime.utcnow() + + except Exception as ex: + self.error(traceback.format_exc()) + # Build report to return to Cortex + full_report = {"message": "User {} authentication tokens successfully revoked at {}".format(self.user, self.time)} + self.report(full_report) + else: + self.error('Incorrect dataType. "mail" expected.') + + +if __name__ == '__main__': + MSEntraID().run() diff --git a/responders/MSEntraID/MSEntraID_TokenRevoker.json b/responders/MSEntraID/MSEntraID_TokenRevoker.json new file mode 100644 index 000000000..32454cbfe --- /dev/null +++ b/responders/MSEntraID/MSEntraID_TokenRevoker.json @@ -0,0 +1,35 @@ +{ + "name": "MSEntraID_TokenRevoker", + "version": "1.1", + "author": "Daniel Weiner @dmweiner, revised by @jahamilto", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Revoke all Microsoft Entra ID authentication session tokens for a User Principal Name.", + "dataTypeList": ["thehive:case_artifact"], + "command": "MSEntraID/MSEntraID.py", + "baseConfig": "MSEntraID", + "configurationItems": [ + {"name": "tenant_id", + "description": "Microsoft Entra ID Tenant ID", + "type": "string", + "multi": false, + "required": true + }, + {"name": "client_id", + "description": "Client ID/Application ID of Microsoft Entra ID Registered App", + "type": "string", + "multi": false, + "required": true + }, + {"name": "client_secret", + "description": "Secret for Microsoft Entra ID Registered Application", + "type": "string", + "multi": false, + "required": true + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.microsoft.com/security/business/identity-access/microsoft-entra-id" +} diff --git a/responders/MSEntraID/README.md b/responders/MSEntraID/README.md new file mode 100644 index 000000000..32b8e1db6 --- /dev/null +++ b/responders/MSEntraID/README.md @@ -0,0 +1,37 @@ +## Microsoft Entra ID Sign In Token Revoker Responder + +This responder allows you to revoke the session tokens for an Microsoft Entra ID user. Requires the UPN of the account in question, which should be entered as a "mail" observable in TheHive. + +### Config + +To enable the responder, you need three values: +1. Microsoft Entra ID Tenant ID +2. Application ID +3. Application Secret + +The first two values can be found at any time in the application's Overview page in the Microsoft Entra ID portal. The secret must be generated and then stored in a safe place, as it is only fully visible when you first make it. + +## Setup + +### Prereqs +User account with the Cloud Application Administrator role. +User account with the Global Administrator Role (most of the steps can be done with only the Cloud App Administrator role, but the final authorization for its API permissions requires GA). + +### Steps + +#### Creation +1. Navigate to the [Microsoft Entra ID Portal](https://entra.microsoft.com/) and sign in with the relevant administrator account. +2. Navigate to App Registrations, and create a new registration. +3. Provide a display name (this can be anything, and can be changed later). Click Register. + +#### Secret +4. Navigate to Certificates and Secrets. +5. Create a new client secret. Enter a relevant description and set a security-conscious expiration date. +6. Copy the Value. **This will only be fully visible for a short time, so you should immediately copy it and store it in a safe place**. + +#### API Permissions +7. Navigate to API permissions. +8. Add the Directory.ReadWrite.All and User.ReadWrite.All permissions (Microsoft Graph API, application permissions). +9. Using a GA account, select the "Grant admin consent for *TENANTNAME*" button. + +10. Place the relevant values into the config within Cortex. \ No newline at end of file diff --git a/responders/MSEntraID/requirements.txt b/responders/MSEntraID/requirements.txt new file mode 100644 index 000000000..98df81c2f --- /dev/null +++ b/responders/MSEntraID/requirements.txt @@ -0,0 +1,3 @@ +cortexutils +requests +datetime \ No newline at end of file diff --git a/responders/MailIncidentStatus/requirements.txt b/responders/MailIncidentStatus/requirements.txt index c476e0aca..a33100424 100644 --- a/responders/MailIncidentStatus/requirements.txt +++ b/responders/MailIncidentStatus/requirements.txt @@ -1,2 +1,2 @@ cortexutils -thehive4py +thehive4py~=1.8.1 diff --git a/responders/Netcraft/Dockerfile b/responders/Netcraft/Dockerfile new file mode 100644 index 000000000..2630b3e1b --- /dev/null +++ b/responders/Netcraft/Dockerfile @@ -0,0 +1,6 @@ +FROM python:3 + +WORKDIR /worker +COPY . Netcraft +RUN pip install --no-cache-dir -r Netcraft/requirements.txt +ENTRYPOINT Netcraft/Netcraft.py diff --git a/responders/Netcraft/Netcraft.py b/responders/Netcraft/Netcraft.py new file mode 100755 index 000000000..b3f4b2dcf --- /dev/null +++ b/responders/Netcraft/Netcraft.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# encoding: utf-8 + +from cortexutils.responder import Responder +import requests + + +class NetcraftReporter(Responder): + def __init__(self): + Responder.__init__(self) + self.scheme = "https" + self.api_key = self.get_param( + 'config.api_key', None, "API-key Missing") + self.takedown_url = self.get_param( + 'config.takedown_url', None, "Takedown URL Missing") + self.observable_type = self.get_param('data.dataType', None, "Data type is empty") + self.observable_description = self.get_param('data.message', None, "Description is empty") + self.username = self.get_param( + 'config.username', None, "Takedown Username is empty") + self.password = self.get_param( + 'config.password', None, "Takedown Password is empty") + self.useUserPass = self.get_param( + 'config.useUserPass', None, "Takedown Use Username Password authentication is empty") + + def run(self): + Responder.run(self) + try: + supported_observables = ["domain", "url", "fqdn"] + if self.observable_type in supported_observables: + if self.observable_type == "domain" or self.observable_type == "fqdn": + domain = self.get_param('data.data', None, 'No artifacts available') + takedown = "{}://{}".format(self.scheme, domain) + elif self.observable_type == "url": + takedown = self.get_param('data.data') + + session = requests.Session() + session.headers.update({'User-Agent': 'Netcraft-Cortex-Responder'}) + + if self.useUserPass: + session.auth = (self.username, self.password) + else: + session.headers.update({'Authorization': 'Bearer ' + self.api_key}) + + payload = { + "attack": takedown, + "comment": "Automated takedown via Cortex" + } + response = session.post(self.takedown_url, data=payload) + + if response.status_code == 200: + self.report({'message': 'Takedown request sent to Netcraft. Message: {}'.format(response.text)}) + elif response.status_code == 401: + self.error('Failed authentication. Check API-Key. Message: {}'.format(response.text)) + else: + self.error('Failed to submit takedown request. Error code: {}. Error message: {}' + .format(response.status_code, response.text)) + else: + self.error('Incorrect dataType. "Domain", "FQDN", or "URL" expected.') + + except requests.exceptions.RequestException as e: + self.error(str(e)) + + def operations(self, raw): + return [self.build_operation('AddTagToArtifact', tag='Netcraft:takedown')] + + +if __name__ == '__main__': + NetcraftReporter().run() diff --git a/responders/Netcraft/NetcraftTakedown.json b/responders/Netcraft/NetcraftTakedown.json new file mode 100644 index 000000000..b349e34f3 --- /dev/null +++ b/responders/Netcraft/NetcraftTakedown.json @@ -0,0 +1,52 @@ +{ + "name": "Netcraft_TakedownPhishingURL", + "version": "1.0", + "author": "Keijo Korte - @korteke", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Submit URL to Netcraft's Takedown API.", + "dataTypeList": ["thehive:case_artifact"], + "command": "Netcraft/Netcraft.py", + "baseConfig": "Netcraft", + "configurationItems": [ + { + "name": "api_key", + "description": "Netcraft Takedown API key", + "type": "string", + "multi": false, + "required": false + }, + { + "name": "username", + "description": "Netcraft Takedown Username", + "type": "string", + "multi": false, + "required": false + }, + { + "name": "password", + "description": "Netcraft Takedown Password", + "type": "string", + "multi": false, + "required": false + }, + { + "name": "useUserPass", + "description": "Use User and Password authentication", + "type": "boolean", + "multi": false + }, + { + "name": "takedown_url", + "description": "Netcraft Takedown URL", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "https://takedown.netcraft.com/authorise.php" + } + ], + "registration_required": true, + "subscription_required": true, + "free_subscription": false, + "service_homepage": "https://www.netcraft.com/cybercrime/countermeasures/" +} diff --git a/responders/Netcraft/README.md b/responders/Netcraft/README.md new file mode 100644 index 000000000..8219db51f --- /dev/null +++ b/responders/Netcraft/README.md @@ -0,0 +1,13 @@ +### Netcraft Takedown + +This responder sends observables to [Netcraft Takedown service](https://www.netcraft.com/cybercrime/countermeasures/). + +#### Requirements +One need to request API-key from Netcraft [Contact form](https://www.netcraft.com/contact/). + +#### Configuration +- `api_key` : Netcraft Takedown API-key +- `takedown_url`: Netcraft Takedown URL (default: https://takedown.netcraft.com/authorise.php) + +#### Official documenation +Official API documentation: [Netcraft site](https://takedown.netcraft.com/help_api.php). \ No newline at end of file diff --git a/responders/Netcraft/requirements.txt b/responders/Netcraft/requirements.txt new file mode 100644 index 000000000..6aabc3cfa --- /dev/null +++ b/responders/Netcraft/requirements.txt @@ -0,0 +1,2 @@ +cortexutils +requests diff --git a/responders/PaloAltoNGFW/requirements.txt b/responders/PaloAltoNGFW/requirements.txt index a827e6c55..9868223c6 100644 --- a/responders/PaloAltoNGFW/requirements.txt +++ b/responders/PaloAltoNGFW/requirements.txt @@ -1,4 +1,4 @@ cortexutils requests pan-os-python -thehive4py \ No newline at end of file +thehive4py~=1.8.1 \ No newline at end of file diff --git a/responders/RT4/requirements.txt b/responders/RT4/requirements.txt index f47373772..52dd08a8d 100644 --- a/responders/RT4/requirements.txt +++ b/responders/RT4/requirements.txt @@ -1,4 +1,5 @@ defang jinja2 rt -requests \ No newline at end of file +requests +cortexutils diff --git a/responders/Shuffle/shuffle.py b/responders/Shuffle/shuffle.py index 63646be2b..5c8690191 100755 --- a/responders/Shuffle/shuffle.py +++ b/responders/Shuffle/shuffle.py @@ -16,7 +16,7 @@ def run(self): headers = { "Authorization": "Bearer %s" % self.api_key } - r = requests.post(parsed_url, headers=headers) + r = requests.post(parsed_url, json=self.get_data(), headers=headers) if r.status_code == 200: self.report({"Message": "Executed workflow"}) else: diff --git a/responders/Telegram/README.md b/responders/Telegram/README.md new file mode 100644 index 000000000..1552e42a5 --- /dev/null +++ b/responders/Telegram/README.md @@ -0,0 +1,15 @@ +### Telegram responder + +##### Data required for the work of the responder + +* **api_token** +How to create a telegram bot and get API token [read here](https://flowxo.com/how-to-create-a-bot-for-telegram-short-and-simple-guide-for-beginners/) + +* **chat_id** +How to get a group or channal chat ID [read here](https://stackoverflow.com/questions/32423837/telegram-bot-how-to-get-a-group-chat-id) + +* **date_format** +Make the date and time format convenient for you or use the default. About date and time code formats [here](https://www.geeksforgeeks.org/python-datetime-strptime-function/) + +* **tag** +If you want a tag to be attached to the case when executing the responder, specify its name (optional) \ No newline at end of file diff --git a/responders/Telegram/Telegram.json b/responders/Telegram/Telegram.json new file mode 100644 index 000000000..f3c49f036 --- /dev/null +++ b/responders/Telegram/Telegram.json @@ -0,0 +1,46 @@ +{ + "name": "Telegram", + "version": "1.0", + "author": "Alex Kolnik, PS Cloud Services, @ps_kz", + "url": "https://github.com/TheHive-Project/Cortex-Analyzers", + "license": "AGPL-V3", + "description": "Send a message to Telegram with information from TheHive case", + "dataTypeList": ["thehive:case"], + "command": "Telegram/telegram.py", + "baseConfig": "Telegram", + "configurationItems": [ + { + "name": "api_token", + "description": "The token is a string, like 110201543:AAHdqTcvCH1vGWJxfSeofSAs0K5PALDsaw, which is required to authorize the bot and send requests to the Bot API", + "type": "string", + "multi": false, + "required": true + }, + { + "name": "chat_id", + "description": "ID of the chat or channel to which the message will be sent", + "type": "number", + "multi": false, + "required": true + }, + { + "name": "date_format", + "description": "https://www.geeksforgeeks.org/python-datetime-strptime-function/", + "type": "string", + "multi": false, + "required": true, + "defaultValue": "%d.%m.%Y %H:%M" + }, + { + "name": "tag", + "description": "Tag name to be assigned to the case", + "type": "string", + "multi": false, + "required": false + } + ], + "registration_required": true, + "subscription_required": false, + "free_subscription": true, + "service_homepage": "https://www.telegram.org" +} diff --git a/responders/Telegram/requirements.txt b/responders/Telegram/requirements.txt new file mode 100644 index 000000000..37dfee161 --- /dev/null +++ b/responders/Telegram/requirements.txt @@ -0,0 +1 @@ +cortexutils \ No newline at end of file diff --git a/responders/Telegram/telegram.py b/responders/Telegram/telegram.py new file mode 100644 index 000000000..a92d45b09 --- /dev/null +++ b/responders/Telegram/telegram.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# encoding: utf-8 + +import json +import requests +from datetime import datetime +from cortexutils.responder import Responder + + +class Telegram(Responder): + + def __init__(self): + Responder.__init__(self) + self.api_token = self.get_param( + "config.api_token", None, "Missing Telegram bot API token") + self.chat_id = self.get_param( + "config.chat_id", None, "Missing Telegram Chat ID") + self.date_format = self.get_param( + "config.date_format", "%d.%m.%Y %H:%M") + self.tag = self.get_param("config.tag", None) + + def run(self): + Responder.run(self) + + # converting TheHive severities to readable + severities = { + 1: 'Low', + 2: 'Medium', + 3: 'High', + 4: 'Critical' + } + + caseId = self.get_param("data.caseId") + title = self.get_param("data.title") + severity = severities[self.get_param("data.severity", 2)] + owner = self.get_param("data.owner") + description = self.get_param("data.description") + + startDate_datetime = datetime.fromtimestamp( + self.get_param("data.startDate", 0) / 1000) + startDate_formated = startDate_datetime.strftime(self.date_format) + + # markdown syntax in TheHive is different from Telegram + description = description.replace("**", "*") + description = description.replace("\n\n", "\n") + + msg_content = f'#Case{caseId}\n' + msg_content += f'*{title}*\n\n' + msg_content += f'*Severity*: {severity}\n' + msg_content += f'*Assignee*: {owner}\n' + msg_content += f'*Date*: {startDate_formated}\n\n' + msg_content += f'*Description*:\n{description}' + + msg_data = {} + msg_data['chat_id'] = self.chat_id + msg_data['text'] = msg_content + msg_data['parse_mode'] = 'markdown' + message = json.dumps(msg_data) + + hook_url = f'https://api.telegram.org/bot{self.api_token}/sendMessage' + headers = {'content-type': 'application/json', + 'Accept-Charset': 'UTF-8'} + resp_code = requests.post(hook_url, headers=headers, data=message) + + self.report({"message": f"{resp_code.text}"}) + + def operations(self, raw): + if self.tag: + return [self.build_operation("AddTagToCase", tag=self.tag)] + + +if __name__ == "__main__": + Telegram().run() diff --git a/responders/Velociraptor/requirements.txt b/responders/Velociraptor/requirements.txt index 148c0c690..7860d238b 100755 --- a/responders/Velociraptor/requirements.txt +++ b/responders/Velociraptor/requirements.txt @@ -2,4 +2,4 @@ cortexutils cryptography grpcio-tools pyvelociraptor -thehive4py \ No newline at end of file +thehive4py~=1.8.1 \ No newline at end of file diff --git a/responders/VirustotalDownloader/requirements.txt b/responders/VirustotalDownloader/requirements.txt index 8bb849d29..cfbc35b68 100644 --- a/responders/VirustotalDownloader/requirements.txt +++ b/responders/VirustotalDownloader/requirements.txt @@ -1,6 +1,6 @@ cortexutils datetime requests -thehive4py +thehive4py~=1.8.1 python-magic filetype diff --git a/thehive-templates/Capa_1_0/long.html b/thehive-templates/Capa_1_0/long.html new file mode 100644 index 000000000..d5840743f --- /dev/null +++ b/thehive-templates/Capa_1_0/long.html @@ -0,0 +1,62 @@ + +
+
Capa Analysis Results
+
+ + + + + + + + + + + + + + + + + + + + + + + +
CapabilityATT&CK IDATT&CK TacticATT&CK TechniqueATT&CK SubtechniqueRule PathExamples
{{rule.meta.name}} + + + {{rule.meta.attack[0].id}} + + + + {{rule.meta.attack[0].tactic}} + + {{rule.meta.attack[0].technique}} + + {{rule.meta.attack[0].subtechnique || 'N/A'}} + + + {{rule.meta.namespace}} + + {{rule.meta.examples.join(', ')}}
+
+
diff --git a/thehive-templates/Capa_1_0/short.html b/thehive-templates/Capa_1_0/short.html new file mode 100644 index 000000000..bcc319f2b --- /dev/null +++ b/thehive-templates/Capa_1_0/short.html @@ -0,0 +1,3 @@ + + {{t.namespace}}={{t.value}} + diff --git a/thehive-templates/Censys_1_0/long.html b/thehive-templates/Censys_1_0/long.html deleted file mode 100644 index 1a1e8aa91..000000000 --- a/thehive-templates/Censys_1_0/long.html +++ /dev/null @@ -1,145 +0,0 @@ -
-
- Censys.io information for {{artifact.data}} -
-
-
- {{content.message}} -
-
-
-
IP
-
{{content.ip.ip}}
-
-
-
Last update
-
{{content.ip.updated_at}}
-
-
-
Location
-
- {{content.ip.location.continent}} - {{content.ip.location.country}} - {{content.ip.location.province}} - {{content.ip.location.city}} -
-
-
-
AS
-
{{content.ip.autonomous_system.asn}}: {{content.ip.autonomous_system.name}}
-
-
-
-
Info on port {{protocol}}
-
- -
-
-
{{content.ip[protocol] | json}}
- -
-
-
-
-
-
-
-
Metadata
-
-

Source: {{content.cert.metadata.source}}

-

Added at: {{content.cert.metadata.added_at}}

-

Updated at: {{content.cert.metadata.updated_at}}

-
-
-
-
Added to CT
-
-

Comodo Mammoth
{{content.cert.ct.comodo_mammoth.added_to_ct_at}}

-

Comodo Sabre
{{content.cert.ct.comodo_sabre.added_to_ct_at}}

-

Google Pilot
{{content.cert.ct.google_pilot.added_to_ct_at}}

-

Google Rocketeer
{{content.cert.ct.google_rocketeer.added_to_ct_at}}

-

Symantec WS CT
{{content.cert.ct.symantec_ws_ct.added_to_ct_at}}

-
-
-
-
Issuer
-
-

{{cn}}

-
-
-
-
Validity
-
-

Valid since: {{content.cert.parsed.validity.start}}

-

Valid until: {{content.cert.parsed.validity.end}}

-
-
-
-
Full certificate data
-
- -
-
-
-
{{content.cert | json}}
- -
-
-
-
-
-
- -
-
Domain
-
{{content.website.domain}}
-
-
-
Last update
-
{{content.website.updated_at}}
-
-
-
Location
-
- {{content.website.location.continent}} - {{content.website.location.country}} - {{content.website.location.province}} - {{content.website.location.city}} -
-
-
-
AS
-
{{content.website.autonomous_system.asn}}: {{content.website.autonomous_system.name}}
-
-
-
Info on port {{protocol}}
-
- -
-
-
-
{{content.website[protocol] | json}}
- -
-
-
-
-
-
-
- - -
-
- {{artifact.data | fang}} -
-
- {{content.errorMessage}} -
-
diff --git a/thehive-templates/Censys_2_0/long.html b/thehive-templates/Censys_2_0/long.html new file mode 100644 index 000000000..cbb9928de --- /dev/null +++ b/thehive-templates/Censys_2_0/long.html @@ -0,0 +1,217 @@ +
+
+ Censys.io information for {{artifact.data}} +
+
+
+ {{content.message}} +
+ +
+ +
+
+
IP
+
{{ipData.ip}}
+
+
+
Last update
+
{{ipData.last_updated_at | date:'medium'}}
+
+
+
Location
+
+ {{ipData.location.continent}} - {{ipData.location.country}} - {{ipData.location.province}} - {{ipData.location.city}} +
+
+
+
AS
+
{{ipData.autonomous_system.asn}}: {{ipData.autonomous_system.name}}
+
+ +
+
Reverse DNS
+
{{ipData.dns.reverse_dns.names.join(', ')}}
+
+ +
+

Services

+
+
Info on port {{service.port}}
+
+ +
+
+
{{service | json}}
+
+
+
+
+
+
+
+ + + +
+ +
+
Metadata
+
+

Added at: {{content.cert.added_at}}

+

Modified at: {{content.cert.modified_at}}

+

Validated at: {{content.cert.validated_at}}

+
+
+ + +
+
Fingerprints
+
+

SHA-256: {{content.cert.fingerprint_sha256}}

+

SHA-1: {{content.cert.fingerprint_sha1}}

+

MD5: {{content.cert.fingerprint_md5}}

+
+
+ + +
+
Validation Level
+
{{content.cert.validation_level}}
+
+ + +
+
Labels
+
{{content.cert.labels.join(', ')}}
+
+ + +
+
Subject Alternative Names
+
+

{{dnsName}}

+
+
+ + +
+
Signed Certificate Timestamps
+
+
+

Log ID: {{sct.log_id}}

+

Timestamp: {{sct.timestamp | date:'medium'}}

+

Version: {{sct.version}}

+
+
+
+
+ + +
+
Issuer
+
+

{{cn}}

+
+
+ + +
+
Validity
+
+

Valid since: {{content.cert.parsed.validity_period.not_before | date:'medium'}}

+

Valid until: {{content.cert.parsed.validity_period.not_after | date:'medium'}}

+
+
+ + +
+
Full Certificate Data
+
+ +
+
+
+
{{content.cert | json}}
+
+
+
+
+
+ + + +
+ +
+
+
Domain
+
{{site.domain || 'N/A'}}
+
+
+
IP
+
{{site.ip}}
+
+
+
Last update
+
{{site.last_updated_at | date:'medium'}}
+
+
+
Location
+
+ {{site.location.continent}} - {{site.location.country}} - {{site.location.province}} - {{site.location.city}} +
+
+
+
AS
+
{{site.autonomous_system.asn}}: {{site.autonomous_system.name}}
+
+
+
Reverse DNS
+
{{site.dns.reverse_dns.names.join(', ')}}
+
+ + +
+

Services

+
+
Info on service {{service.port}}
+
+ +
+
+
+
{{service | json}}
+
+
+
+
+
+ +
+
+
+
+
+ + +
+
+ {{artifact.data}} +
+
+ {{content.errorMessage}} +
+
\ No newline at end of file diff --git a/thehive-templates/Censys_1_0/short.html b/thehive-templates/Censys_2_0/short.html similarity index 100% rename from thehive-templates/Censys_1_0/short.html rename to thehive-templates/Censys_2_0/short.html diff --git a/thehive-templates/Crowdsec_1_0/long.html b/thehive-templates/Crowdsec_1_0/long.html deleted file mode 100644 index 13617c06c..000000000 --- a/thehive-templates/Crowdsec_1_0/long.html +++ /dev/null @@ -1,111 +0,0 @@ - - -
-
- {{(artifact.data || artifact.attachment.name) | fang}} -
-
- {{content.message || 'Error while retrieving information'}} -
-
- -
-
-
- CrowdSec record for "{{artifact.data}}" -
- view more on app.crowdsec.net -
-
-
-
Reverse DNS
-
{{content.reverse_dns}}
-
-
-
Range
-
{{content.ip_range}}
-
-
-
Autonomous System
-
{{content.as_name}}
-
-
-
Location
-
{{content.location.city}} {{content.location.country}}
-
-
-
-
First seen
-
{{content.history.first_seen}}
-
-
-
Last seen
-
{{content.history.last_seen}}
-
-
-
-
Known For
-
- - {{b.label}} - -
-
-
-
- -
-
- Attacks details -
- -
-
Classification
-
- - {{c.label}} - -
-
-
-
False Positive
-
- - {{fp.label}} - -
-
-
-
-
Attacks
-
- - {{a.label}} - -
-
- -
-

Aggressiveness (0 to 5) -

- - - - - - - - - - - - - -
OverallLast dayLast weekLast month
{{content.scores.overall.aggressiveness}}{{content.scores.last_day.aggressiveness}}{{content.scores.last_week.aggressiveness}}{{content.scores.last_month.aggressiveness}}
-
-
-
\ No newline at end of file diff --git a/thehive-templates/Crowdsec_1_1/long.html b/thehive-templates/Crowdsec_1_1/long.html new file mode 100644 index 000000000..65403c60f --- /dev/null +++ b/thehive-templates/Crowdsec_1_1/long.html @@ -0,0 +1,199 @@ + + +
+
+ {{(artifact.data || artifact.attachment.name) | fang}} +
+
+ {{content.message || 'Error while retrieving information'}} +
+
+ +
+
+
+ CrowdSec record for "{{artifact.data}}" +
+
+
+ Reputation + {{content.reputation}} + +
+
+

+ View more on app.crowdsec.net +
+
+
+
+
+
Crowd Confidence
+
+ {{content.confidence}} +
+
+
+
+
+
Location
+
{{content.location.city}} {{content.location.country}}
+
+
+
+
+
+
+
First seen
+
{{content.history.first_seen}}
+
+
+
+
+
Last seen
+
{{content.history.last_seen}}
+
+
+
+
+
Known For
+
+ + {{b.label}} + + + {{cve}} + +
+
+
+
MITRE
+
+ + + {{m.label}} + + +
+
+
+
+
+
Reverse DNS
+
{{content.reverse_dns}}
+
+
+
Range
+
{{content.ip_range}}
+
+
+
Autonomous System
+
{{content.as_name}}
+
+
+
+ +
+
+ Attack details +
+
+
Classification
+
+ + {{c.label}} + +
+
+
+
False Positive
+
+ + {{fp.label}} + +
+
+
+
+
Attacks
+
+ + {{a.label}} + +
+
+
+
Top Targeted countries
+
+ + {{country}}: {{percent}}% + +
+
+
+

Aggressiveness (0 to 5)

+ + + + + + + + + + + + + +
OverallLast dayLast weekLast month
{{content.scores.overall.aggressiveness}}{{content.scores.last_day.aggressiveness}}{{content.scores.last_week.aggressiveness}}{{content.scores.last_month.aggressiveness}}
+
+
+ +
+
+ Blocklists containing this IP +
+
+
+
+
+ {{r.label}} +
+
+ {{r.description}} +
+
+
+
+
+ No blocklists found for this IP +
+
+
+ + +
diff --git a/thehive-templates/DNSdumpster_report_1_0/long.html b/thehive-templates/DNSdumpster_report_1_0/long.html new file mode 100644 index 000000000..e2c09c3b2 --- /dev/null +++ b/thehive-templates/DNSdumpster_report_1_0/long.html @@ -0,0 +1,127 @@ +
+
+ DNSDumpster Information for {{ artifact.data }} +
+
+
+
+
Domain
+
{{ content.result.domain }}
+
+
+
+
+ +
+
+ DNS Records +
+
+
+
+
DNS Records
+
+ + Domain: {{ dns.domain }}
+ IP: {{ dns.ip }}
+ Reverse DNS: {{ dns.reverse_dns }}
+ AS: {{ dns.as }}
+ Provider: {{ dns.provider }}
+ Country: {{ dns.country }}
+ Header: {{ dns.header }}

+
+
+
+
+
+
+ +
+
+ MX Records +
+
+
+
+
MX Records
+
+ + Domain: {{ mx.domain }}
+ Reverse DNS: {{ mx.reverse_dns }}
+ IP: {{ mx.ip }}
+ AS: {{ mx.as }}
+ Provider: {{ mx.provider }}
+ Country: {{ mx.country }}
+ Header: {{ mx.header }}

+
+
+
+
+
+
+ +
+
+ TXT Records +
+
+
+
+
TXT Records
+
+ + {{ txt }}

+
+
+
+
+
+
+ +
+
+ Host Information +
+
+
+
+
Host Records
+
+ + Domain: {{ host.domain }}
+ IP: {{ host.ip }}
+ Reverse DNS: {{ host.reverse_dns }}
+ AS: {{ host.as }}
+ Provider: {{ host.provider }}
+ Country: {{ host.country }}
+ Header: {{ host.header }}

+
+
+
+
+
+
+ +
+
+ DNS Map +
+
+
+
+
Map URL
+
View DNS Map
+
+
+
+
+ + +
+
+ {{ artifact.data }} +
+
+ {{ content.errorMessage }} +
+
\ No newline at end of file diff --git a/thehive-templates/DNSdumpster_report_1_0/short.html b/thehive-templates/DNSdumpster_report_1_0/short.html new file mode 100644 index 000000000..27045d7e9 --- /dev/null +++ b/thehive-templates/DNSdumpster_report_1_0/short.html @@ -0,0 +1,6 @@ + + {{t.namespace}}:{{t.predicate}}={{t.value}} + \ No newline at end of file diff --git a/thehive-templates/EclecticIQ_SearchObservable_1_0/long.html b/thehive-templates/EclecticIQ_SearchObservable_1_0/long.html new file mode 100644 index 000000000..2fd9cb7d3 --- /dev/null +++ b/thehive-templates/EclecticIQ_SearchObservable_1_0/long.html @@ -0,0 +1,40 @@ +
No Data
+ +
+
{{res.type}} - {{res.title}}
+
+
+
ID:
+
+ {{res.type}}--{{res.id}} +
+
+
+
Entity Type:
+
{{res.type}}
+
+
+
Timestamp:
+
{{res.timestamp}}
+
+
+
Source Name:
+
{{res.source_name}}
+
+
+
Tags:
+
  • {{tag}}
+
+
+
+ + + +
+
+ {{(artifact.data || artifact.attachment.name) | fang}} +
+
{{content.errorMessage}}
+
diff --git a/thehive-templates/EclecticIQ_SearchObservable_1_0/short.html b/thehive-templates/EclecticIQ_SearchObservable_1_0/short.html new file mode 100644 index 000000000..599291f16 --- /dev/null +++ b/thehive-templates/EclecticIQ_SearchObservable_1_0/short.html @@ -0,0 +1,7 @@ + + {{t.namespace}} {{t.predicate}} {{t.value}} + diff --git a/thehive-templates/HybridAnalysis_GetReport_1_0/long.html b/thehive-templates/HybridAnalysis_GetReport_1_0/long.html index 860321651..0b2e9ccaa 100644 --- a/thehive-templates/HybridAnalysis_GetReport_1_0/long.html +++ b/thehive-templates/HybridAnalysis_GetReport_1_0/long.html @@ -1,4 +1,4 @@ - +
{{(artifact.data || artifact.attachment.name) | fang}} @@ -10,107 +10,304 @@
- Related Reports
- -
-
-
- Verdict: - - {{r.verdict}} -
-
-
- Threat Score: {{r.threatscore}}
-
-
- Tagged as: - - {{tag}} - -
-
- Submitted filename: {{r.submitname}}
-
-
- Analysis Start Time: {{r.analysis_start_time}}
-
-
- MD5: {{r.md5}}
-
-
- SHA1: {{r.sha1}}
-
-
- SHA256: {{r.sha256}}
-
-
- File Description: {{r.type}}
-
-
- AVdetect Score: {{r.avdetect}}
-
-
- VxFamily: {{r.vxfamily}}
-
-
- Total Signatures: {{r.total_signatures}}
-
-
- Environment Description: {{r.environmentDescription}}
-
-
- DNS requests:
  • {{domain}}
-
-
- Contacted Hosts:
  • {{host}}
-
-
- Online report: - https://www.hybrid-analysis.com/sample/{{r.sha256}} -
-
+ + +
+
+ No results were returned.
- + +
-
-
- Verdict: - - {{r.verdict}} -
-
-
- Submitted filename: {{r.submitname}}
-
-
- Threat Score: {{r.threatscore}}
-
-
- SHA256: {{r.sha256}}
+
+ + +
+

Submissions

+
+
Filename:
+
{{submission.filename}}
+
Submission ID:
+
{{submission.submission_id}}
+
Created At:
+
{{submission.created_at | date:'medium'}}
+
+ + +
+
Verdict:
+
+ + {{report.verdict}} + +
+
+ + +
+
Threat Score:
+
{{report.threat_score}}
+
+ + +
+
Online Report:
+
+ + https://www.hybrid-analysis.com/sample/{{report.sha256}} + +
+
+
+ + +
+
MD5:
+
{{report.md5}}
-
- File type: {{r.type_short}}
+
+
SHA1:
+
{{report.sha1}}
-
- File Description: {{r.type}}
+
+
SHA256:
+
{{report.sha256}}
-
- AVdetect Score: {{r.avdetect}}
+
+
File Description:
+
{{report.type}}
-
- VxFamily: {{r.vxfamily}}
+
+
AVdetect Score:
+
{{report.av_detect}}
-
- Environment Description: {{r.environmentDescription}}
+
+
VxFamily:
+
{{report.vx_family}}
-
- Online report: - https://www.hybrid-analysis.com/sample/{{r.sha256}} +
+
Environment Description:
+
{{report.environment_description}}
+ + +
+

+ MITRE ATT&CK Tactics and Techniques + +

+
+
+
+
Tactic:
+
{{attck.tactic}}
+ +
Technique:
+
{{attck.technique}}
+ +
Technique ID:
+
+ {{attck.attck_id}} +
+ +
Parent Technique:
+
+ {{attck.parent.technique}} (ID: + {{attck.parent.attck_id}}) +
+ +
Malicious Identifiers Count:
+
{{attck.malicious_identifiers_count}}
+ +
Suspicious Identifiers Count:
+
{{attck.suspicious_identifiers_count}}
+ +
Informative Identifiers Count:
+
{{attck.informative_identifiers_count}}
+
+
+
+
+
+ + +
+

+ Signatures + +

+
+
+
+
Signature Name:
+
{{signature.name}}
+ +
Description:
+
{{signature.description}}
+ +
Threat Level:
+
{{signature.threat_level_human}} ({{signature.threat_level}}) +
+ +
Relevance:
+
{{signature.relevance}}
+ +
Category:
+
{{signature.category}}
+
+
+
+
+
+ + +
+

+ Extracted Files + +

+
+
+
+
File Name:
+
{{file.filename}}
+ +
Type:
+
{{file.type}}
+ +
MD5:
+
{{file.md5}}
+ +
SHA256:
+
{{file.sha256}}
+ +
Size:
+
{{file.size}}
+
+
+
+
+
+ + +
+

+ Antivirus Detection + +

+
+
+
AV Detection Score:
+
{{report.av_detect}}
+ +
VX Family:
+
{{report.vx_family}}
+ +
Malicious Engine Count:
+
{{report.malicious_engine_count}}
+ +
Malicious Engine Details:
+
+
    +
  • {{engine.name}} - {{engine.result}}
  • +
+
+
+
+
+ + +
+

+ Contacted Hosts + +

+
+
    +
  • {{host}}
  • +
+
+
+ + +
+

+ Indicators of Compromise (IoCs) + +

+
+
+
MD5:
+
{{report.md5}}
+ +
SHA1:
+
{{report.sha1}}
+ +
SHA256:
+
{{report.sha256}}
+
+
+
+ + +
+

+ Processes + +

+
+
+
+
Process Name:
+
{{process.name}}
+ +
PID:
+
{{process.pid}}
+ +
Parent PID:
+
{{process.ppid}}
+ +
Command Line:
+
{{process.command_line}}
+
+
+
+
+
+
diff --git a/thehive-templates/MSEntraID_GetSignIns_1_0/long.html b/thehive-templates/MSEntraID_GetSignIns_1_0/long.html new file mode 100644 index 000000000..d9f508b50 --- /dev/null +++ b/thehive-templates/MSEntraID_GetSignIns_1_0/long.html @@ -0,0 +1,104 @@ +
+
+ Microsoft Entra ID Sign Ins +
+ +
+ Analyzers searched for: {{content.filterParameters}} +
+
+ +
+
+ Microsoft Entra ID Sign Ins +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SignIn IDTimeStatusIPApp NameRiskDevice IDDevice NameDevice OSCond'l AccessLocation
{{r.id | limitTo: 8}}{{r.basicDetails.signInTime}}{{r.basicDetails.result | limitTo: 7}}IPv6{{r.basicDetails.ip}}{{r.basicDetails.appName}}{{r.basicDetails.riskLevel}}Not Available{{r.deviceDetails.id | limitTo: 8}}{{r.deviceDetails.deviceName}}{{r.deviceDetails.operatingSystem}}NoYes{{r.locationDetails.city}}, {{r.locationDetails.state}}, {{r.locationDetails.countryOrRegion}}
+
+
+
+
+ Expanded Information +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + +
SignIn IDIPv6App NameClient AppResource NameApplied CAPsDevice ID
{{r.id}}{{r.basicDetails.ip}}IPv4{{r.basicDetails.appName}}{{r.basicDetails.clientApp}}{{r.basicDetails.resourceName}}{{r.appliedConditionalAccessPolicies}}{{r.deviceDetails.id}}
+
+ +
+ + + + +
+
+ {{(artifact.data || artifact.attachment.name) | fang}} +
+
+
+
GetAzureSignIns:
+
{{content.errorMessage}}
+
+
+
diff --git a/thehive-templates/MSEntraID_GetSignIns_1_0/short.html b/thehive-templates/MSEntraID_GetSignIns_1_0/short.html new file mode 100644 index 000000000..41e83c5d0 --- /dev/null +++ b/thehive-templates/MSEntraID_GetSignIns_1_0/short.html @@ -0,0 +1,3 @@ + + {{t.namespace}}:{{t.predicate}}="{{t.value}}" + \ No newline at end of file diff --git a/thehive-templates/OktaUserLookup_1_0/long.html b/thehive-templates/OktaUserLookup_1_0/long.html new file mode 100644 index 000000000..aa37f1ad1 --- /dev/null +++ b/thehive-templates/OktaUserLookup_1_0/long.html @@ -0,0 +1,26 @@ +
+ + +
+
+ Okta User Lookup Results +
+
+
+ No records found +
+
+
+
{{key}}:
+
{{value}}
+
+
+
+
+
+ +
\ No newline at end of file diff --git a/thehive-templates/OktaUserLookup_1_0/short.html b/thehive-templates/OktaUserLookup_1_0/short.html new file mode 100644 index 000000000..acc0c81a6 --- /dev/null +++ b/thehive-templates/OktaUserLookup_1_0/short.html @@ -0,0 +1,6 @@ + + {{t.namespace}}:{{t.predicate}}={{t.value}} + diff --git a/thehive-templates/QrDecode_1_0/long.html b/thehive-templates/QrDecode_1_0/long.html new file mode 100755 index 000000000..e8046ed0c --- /dev/null +++ b/thehive-templates/QrDecode_1_0/long.html @@ -0,0 +1,72 @@ + +
+
+ Stats +
+
+ + + + + + + + + + + + + +
File Name{{ content["stats"]["file_name"] }}
File Extension{{ content["stats"]["file_extension"] }}
Total QR Code(s){{ content["stats"]["total_qr_codes"] }}
+
+
+ +
+
+ QR: {{ result.results.QR }} +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
Data Category{{ result.results.data_category }}
Data Type{{ result.results.data_type }}
Info{{ result.results.info }}
Data{{ result.results.data }}
Brute Data{{ result.results.brute_data }}
On PDF page{{ result.results.page }}
+
+
+ + + +
+
+ Error +
+
+
+
+ QrDecode: +
+
{{ content.stats.error }}
+
+
+
diff --git a/thehive-templates/Triage_1_0/short.html b/thehive-templates/QrDecode_1_0/short.html old mode 100644 new mode 100755 similarity index 100% rename from thehive-templates/Triage_1_0/short.html rename to thehive-templates/QrDecode_1_0/short.html diff --git a/thehive-templates/Triage_1_0/long.html b/thehive-templates/Triage_2_0/long.html similarity index 94% rename from thehive-templates/Triage_1_0/long.html rename to thehive-templates/Triage_2_0/long.html index cb8fc8746..d5734554f 100644 --- a/thehive-templates/Triage_1_0/long.html +++ b/thehive-templates/Triage_2_0/long.html @@ -2,8 +2,7 @@
Triage Analyze
@@ -100,4 +99,3 @@
- diff --git a/thehive-templates/Triage_2_0/short.html b/thehive-templates/Triage_2_0/short.html new file mode 100644 index 000000000..5fc0dabfb --- /dev/null +++ b/thehive-templates/Triage_2_0/short.html @@ -0,0 +1,3 @@ + + {{t.namespace}}:{{t.predicate}}="{{t.value}}" + diff --git a/utils/flavors/check_json_schema.py b/utils/flavors/check_json_schema.py index 6821f2b28..74e0fa8f2 100644 --- a/utils/flavors/check_json_schema.py +++ b/utils/flavors/check_json_schema.py @@ -9,8 +9,7 @@ """ import json -import jsonschema -from jsonschema import validate +from jsonschema import Draft7Validator, FormatChecker import sys import os import argparse @@ -62,23 +61,23 @@ def fixJsonFlavorFile(jsonfile:dict) -> dict: jsonfile["screenshots"] = screenshots return jsonfile -def validateFlavorFormat(flavorfile:str, schemafile:str, fix:bool) -> str: +def validateFlavorFormat(flavorfile: str, schemafile: str, fix: bool) -> str: flavorSchema = openJsonFile(schemafile) fjson = openJsonFile(flavorfile) - - validator = jsonschema.Draft7Validator(flavorSchema, format_checker=jsonschema.draft7_format_checker) - errors = sorted(validator.iter_errors(fjson),key=lambda e: e.path) - if not errors: + formatchecker = FormatChecker() + validator = Draft7Validator(flavorSchema, format_checker=formatchecker) + errors = sorted(validator.iter_errors(fjson), key=lambda e: e.path) + if not errors: printSuccess(True, flavorfile) else: printSuccess(False, flavorfile) for error in errors: - print("{}: {}".format(error.path,error.message)) + print("{}: {}".format(error.path, error.message)) if fix: print("Fixing {}".format(flavorfile)) j = fixJsonFlavorFile(fjson) with open(flavorfile, 'w+') as fj: - fj.write(json.dumps(j,indent=4)) + fj.write(json.dumps(j, indent=4)) fj.close() @@ -118,4 +117,4 @@ def run(): print(e) if __name__ == '__main__': - run() + run() \ No newline at end of file diff --git a/utils/flavors/flavor_schema.json b/utils/flavors/flavor_schema.json index da331d473..3e3145d50 100644 --- a/utils/flavors/flavor_schema.json +++ b/utils/flavors/flavor_schema.json @@ -95,6 +95,9 @@ "description": { "type": "string" }, + "type": { + "type": "string" + }, "multi": { "type": "boolean" }, @@ -109,6 +112,7 @@ "name", "description", "multi", + "type", "required" ] }, diff --git a/utils/test_doc/requirements.txt b/utils/test_doc/requirements.txt index d14e7a946..2859da4d7 100644 --- a/utils/test_doc/requirements.txt +++ b/utils/test_doc/requirements.txt @@ -5,3 +5,4 @@ mkdocs-git-revision-date-localized-plugin mkdocs-material mkdocs-material-extensions mkdocs-pymdownx-material-extras +mdutils \ No newline at end of file