forked from p15670423/monkey
Merge branch 'develop' into feature/single_command_for_installation
# Conflicts: # deployment_scripts/deploy_linux.sh
This commit is contained in:
commit
5affcbda1a
|
@ -0,0 +1,12 @@
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from common.cloud.aws.aws_instance import AwsInstance
|
||||||
|
from common.cloud.azure.azure_instance import AzureInstance
|
||||||
|
from common.cloud.gcp.gcp_instance import GcpInstance
|
||||||
|
from common.cloud.instance import CloudInstance
|
||||||
|
|
||||||
|
all_cloud_instances = [AwsInstance(), AzureInstance(), GcpInstance()]
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_cloud_instances() -> List[CloudInstance]:
|
||||||
|
return all_cloud_instances
|
|
@ -6,6 +6,9 @@ import logging
|
||||||
|
|
||||||
__author__ = 'itay.mizeretz'
|
__author__ = 'itay.mizeretz'
|
||||||
|
|
||||||
|
from common.cloud.environment_names import Environment
|
||||||
|
from common.cloud.instance import CloudInstance
|
||||||
|
|
||||||
AWS_INSTANCE_METADATA_LOCAL_IP_ADDRESS = "169.254.169.254"
|
AWS_INSTANCE_METADATA_LOCAL_IP_ADDRESS = "169.254.169.254"
|
||||||
AWS_LATEST_METADATA_URI_PREFIX = 'http://{0}/latest/'.format(AWS_INSTANCE_METADATA_LOCAL_IP_ADDRESS)
|
AWS_LATEST_METADATA_URI_PREFIX = 'http://{0}/latest/'.format(AWS_INSTANCE_METADATA_LOCAL_IP_ADDRESS)
|
||||||
ACCOUNT_ID_KEY = "accountId"
|
ACCOUNT_ID_KEY = "accountId"
|
||||||
|
@ -13,10 +16,15 @@ ACCOUNT_ID_KEY = "accountId"
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class AwsInstance(object):
|
class AwsInstance(CloudInstance):
|
||||||
"""
|
"""
|
||||||
Class which gives useful information about the current instance you're on.
|
Class which gives useful information about the current instance you're on.
|
||||||
"""
|
"""
|
||||||
|
def is_instance(self):
|
||||||
|
return self.instance_id is not None
|
||||||
|
|
||||||
|
def get_cloud_provider_name(self) -> Environment:
|
||||||
|
return Environment.AWS
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.instance_id = None
|
self.instance_id = None
|
||||||
|
@ -57,9 +65,6 @@ class AwsInstance(object):
|
||||||
def get_region(self):
|
def get_region(self):
|
||||||
return self.region
|
return self.region
|
||||||
|
|
||||||
def is_aws_instance(self):
|
|
||||||
return self.instance_id is not None
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _extract_account_id(instance_identity_document_response):
|
def _extract_account_id(instance_identity_document_response):
|
||||||
"""
|
"""
|
|
@ -4,7 +4,7 @@ import boto3
|
||||||
import botocore
|
import botocore
|
||||||
from botocore.exceptions import ClientError
|
from botocore.exceptions import ClientError
|
||||||
|
|
||||||
from common.cloud.aws_instance import AwsInstance
|
from common.cloud.aws.aws_instance import AwsInstance
|
||||||
|
|
||||||
__author__ = ['itay.mizeretz', 'shay.nehmad']
|
__author__ = ['itay.mizeretz', 'shay.nehmad']
|
||||||
|
|
|
@ -0,0 +1,55 @@
|
||||||
|
import logging
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from common.cloud.environment_names import Environment
|
||||||
|
from common.cloud.instance import CloudInstance
|
||||||
|
|
||||||
|
LATEST_AZURE_METADATA_API_VERSION = "2019-04-30"
|
||||||
|
AZURE_METADATA_SERVICE_URL = "http://169.254.169.254/metadata/instance?api-version=%s" % LATEST_AZURE_METADATA_API_VERSION
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AzureInstance(CloudInstance):
|
||||||
|
"""
|
||||||
|
Access to useful information about the current machine if it's an Azure VM.
|
||||||
|
Based on Azure metadata service: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
|
||||||
|
"""
|
||||||
|
def is_instance(self):
|
||||||
|
return self.on_azure
|
||||||
|
|
||||||
|
def get_cloud_provider_name(self) -> Environment:
|
||||||
|
return Environment.AZURE
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
"""
|
||||||
|
Determines if on Azure and if so, gets some basic metadata on this instance.
|
||||||
|
"""
|
||||||
|
self.instance_name = None
|
||||||
|
self.instance_id = None
|
||||||
|
self.location = None
|
||||||
|
self.on_azure = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.get(AZURE_METADATA_SERVICE_URL, headers={"Metadata": "true"})
|
||||||
|
self.on_azure = True
|
||||||
|
|
||||||
|
# If not on cloud, the metadata URL is non-routable and the connection will fail.
|
||||||
|
# If on AWS, should get 404 since the metadata service URL is different, so bool(response) will be false.
|
||||||
|
if response:
|
||||||
|
logger.debug("On Azure. Trying to parse metadata.")
|
||||||
|
self.try_parse_response(response)
|
||||||
|
else:
|
||||||
|
logger.warning("On Azure, but metadata response not ok: {}".format(response.status_code))
|
||||||
|
except requests.RequestException:
|
||||||
|
logger.debug("Failed to get response from Azure metadata service: This instance is not on Azure.")
|
||||||
|
self.on_azure = False
|
||||||
|
|
||||||
|
def try_parse_response(self, response):
|
||||||
|
try:
|
||||||
|
response_data = response.json()
|
||||||
|
self.instance_name = response_data["compute"]["name"]
|
||||||
|
self.instance_id = response_data["compute"]["vmId"]
|
||||||
|
self.location = response_data["compute"]["location"]
|
||||||
|
except KeyError:
|
||||||
|
logger.exception("Error while parsing response from Azure metadata service.")
|
|
@ -0,0 +1,15 @@
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class Environment(Enum):
|
||||||
|
UNKNOWN = "Unknown"
|
||||||
|
ON_PREMISE = "On Premise"
|
||||||
|
AZURE = "Azure"
|
||||||
|
AWS = "AWS"
|
||||||
|
GCP = "GCP"
|
||||||
|
ALIBABA = "Alibaba Cloud"
|
||||||
|
IBM = "IBM Cloud"
|
||||||
|
DigitalOcean = "Digital Ocean"
|
||||||
|
|
||||||
|
|
||||||
|
ALL_ENVIRONMENTS_NAMES = [x.value for x in Environment]
|
|
@ -0,0 +1,43 @@
|
||||||
|
import logging
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from common.cloud.environment_names import Environment
|
||||||
|
from common.cloud.instance import CloudInstance
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
GCP_METADATA_SERVICE_URL = "http://metadata.google.internal/"
|
||||||
|
|
||||||
|
|
||||||
|
class GcpInstance(CloudInstance):
|
||||||
|
"""
|
||||||
|
Used to determine if on GCP. See https://cloud.google.com/compute/docs/storing-retrieving-metadata#runninggce
|
||||||
|
"""
|
||||||
|
def is_instance(self):
|
||||||
|
return self.on_gcp
|
||||||
|
|
||||||
|
def get_cloud_provider_name(self) -> Environment:
|
||||||
|
return Environment.GCP
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.on_gcp = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
# If not on GCP, this domain shouldn't resolve.
|
||||||
|
response = requests.get(GCP_METADATA_SERVICE_URL)
|
||||||
|
|
||||||
|
if response:
|
||||||
|
logger.debug("Got ok metadata response: on GCP")
|
||||||
|
self.on_gcp = True
|
||||||
|
|
||||||
|
if "Metadata-Flavor" not in response.headers:
|
||||||
|
logger.warning("Got unexpected GCP Metadata format")
|
||||||
|
else:
|
||||||
|
if not response.headers["Metadata-Flavor"] == "Google":
|
||||||
|
logger.warning("Got unexpected Metadata flavor: {}".format(response.headers["Metadata-Flavor"]))
|
||||||
|
else:
|
||||||
|
logger.warning("On GCP, but metadata response not ok: {}".format(response.status_code))
|
||||||
|
except requests.RequestException:
|
||||||
|
logger.debug("Failed to get response from GCP metadata service: This instance is not on GCP")
|
||||||
|
self.on_gcp = False
|
|
@ -0,0 +1,14 @@
|
||||||
|
from common.cloud.environment_names import Environment
|
||||||
|
|
||||||
|
|
||||||
|
class CloudInstance(object):
|
||||||
|
"""
|
||||||
|
This is an abstract class which represents a cloud instance.
|
||||||
|
|
||||||
|
The current machine can be a cloud instance (for example EC2 instance or Azure VM).
|
||||||
|
"""
|
||||||
|
def is_instance(self) -> bool:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def get_cloud_provider_name(self) -> Environment:
|
||||||
|
raise NotImplementedError()
|
|
@ -1,6 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from common.cloud.aws_service import AwsService
|
from common.cloud.aws.aws_service import AwsService
|
||||||
from common.cmd.aws.aws_cmd_result import AwsCmdResult
|
from common.cmd.aws.aws_cmd_result import AwsCmdResult
|
||||||
from common.cmd.cmd_runner import CmdRunner
|
from common.cmd.cmd_runner import CmdRunner
|
||||||
from common.cmd.cmd_status import CmdStatus
|
from common.cmd.cmd_status import CmdStatus
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
AWS_COLLECTOR = "AwsCollector"
|
||||||
|
HOSTNAME_COLLECTOR = "HostnameCollector"
|
||||||
|
ENVIRONMENT_COLLECTOR = "EnvironmentCollector"
|
||||||
|
PROCESS_LIST_COLLECTOR = "ProcessListCollector"
|
|
@ -1,6 +1,6 @@
|
||||||
import hashlib
|
import hashlib
|
||||||
import os
|
|
||||||
import json
|
import json
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
import uuid
|
import uuid
|
||||||
from abc import ABCMeta
|
from abc import ABCMeta
|
||||||
|
@ -12,7 +12,7 @@ GUID = str(uuid.getnode())
|
||||||
|
|
||||||
EXTERNAL_CONFIG_FILE = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'monkey.bin')
|
EXTERNAL_CONFIG_FILE = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'monkey.bin')
|
||||||
|
|
||||||
SENSITIVE_FIELDS = ["exploit_password_list", "exploit_user_list"]
|
SENSITIVE_FIELDS = ["exploit_password_list", "exploit_user_list", "exploit_ssh_keys"]
|
||||||
HIDDEN_FIELD_REPLACEMENT_CONTENT = "hidden"
|
HIDDEN_FIELD_REPLACEMENT_CONTENT = "hidden"
|
||||||
|
|
||||||
|
|
||||||
|
@ -125,6 +125,7 @@ class Configuration(object):
|
||||||
|
|
||||||
finger_classes = []
|
finger_classes = []
|
||||||
exploiter_classes = []
|
exploiter_classes = []
|
||||||
|
system_info_collectors_classes = []
|
||||||
|
|
||||||
# how many victims to look for in a single scan iteration
|
# how many victims to look for in a single scan iteration
|
||||||
victims_max_find = 100
|
victims_max_find = 100
|
||||||
|
|
|
@ -32,11 +32,17 @@ from infection_monkey.telemetry.attack.t1106_telem import T1106Telem
|
||||||
from common.utils.attack_utils import ScanStatus, UsageEnum
|
from common.utils.attack_utils import ScanStatus, UsageEnum
|
||||||
from infection_monkey.exploit.HostExploiter import HostExploiter
|
from infection_monkey.exploit.HostExploiter import HostExploiter
|
||||||
|
|
||||||
|
MAX_DEPTH_REACHED_MESSAGE = "Reached max depth, shutting down"
|
||||||
|
|
||||||
__author__ = 'itamar'
|
__author__ = 'itamar'
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PlannedShutdownException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class InfectionMonkey(object):
|
class InfectionMonkey(object):
|
||||||
def __init__(self, args):
|
def __init__(self, args):
|
||||||
self._keep_running = False
|
self._keep_running = False
|
||||||
|
@ -87,143 +93,158 @@ class InfectionMonkey(object):
|
||||||
LOG.debug("Default server: %s is already in command servers list" % self._default_server)
|
LOG.debug("Default server: %s is already in command servers list" % self._default_server)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
LOG.info("Monkey is running...")
|
try:
|
||||||
|
LOG.info("Monkey is starting...")
|
||||||
|
|
||||||
# Sets island's IP and port for monkey to communicate to
|
LOG.debug("Starting the setup phase.")
|
||||||
if not self.set_default_server():
|
# Sets island's IP and port for monkey to communicate to
|
||||||
return
|
self.set_default_server()
|
||||||
self.set_default_port()
|
self.set_default_port()
|
||||||
|
|
||||||
# Create a dir for monkey files if there isn't one
|
# Create a dir for monkey files if there isn't one
|
||||||
create_monkey_dir()
|
create_monkey_dir()
|
||||||
|
|
||||||
if WindowsUpgrader.should_upgrade():
|
self.upgrade_to_64_if_needed()
|
||||||
self._upgrading_to_64 = True
|
|
||||||
self._singleton.unlock()
|
|
||||||
LOG.info("32bit monkey running on 64bit Windows. Upgrading.")
|
|
||||||
WindowsUpgrader.upgrade(self._opts)
|
|
||||||
return
|
|
||||||
|
|
||||||
ControlClient.wakeup(parent=self._parent)
|
ControlClient.wakeup(parent=self._parent)
|
||||||
ControlClient.load_control_config()
|
ControlClient.load_control_config()
|
||||||
|
|
||||||
if is_windows_os():
|
if is_windows_os():
|
||||||
T1106Telem(ScanStatus.USED, UsageEnum.SINGLETON_WINAPI).send()
|
T1106Telem(ScanStatus.USED, UsageEnum.SINGLETON_WINAPI).send()
|
||||||
|
|
||||||
if not WormConfiguration.alive:
|
self.shutdown_by_not_alive_config()
|
||||||
LOG.info("Marked not alive from configuration")
|
|
||||||
return
|
|
||||||
|
|
||||||
if firewall.is_enabled():
|
if firewall.is_enabled():
|
||||||
firewall.add_firewall_rule()
|
firewall.add_firewall_rule()
|
||||||
|
|
||||||
monkey_tunnel = ControlClient.create_control_tunnel()
|
monkey_tunnel = ControlClient.create_control_tunnel()
|
||||||
if monkey_tunnel:
|
if monkey_tunnel:
|
||||||
monkey_tunnel.start()
|
monkey_tunnel.start()
|
||||||
|
|
||||||
StateTelem(is_done=False).send()
|
StateTelem(is_done=False).send()
|
||||||
TunnelTelem().send()
|
TunnelTelem().send()
|
||||||
|
|
||||||
|
LOG.debug("Starting the post-breach phase.")
|
||||||
|
self.collect_system_info_if_configured()
|
||||||
|
PostBreach().execute_all_configured()
|
||||||
|
|
||||||
|
LOG.debug("Starting the propagation phase.")
|
||||||
|
self.shutdown_by_max_depth_reached()
|
||||||
|
|
||||||
|
for iteration_index in range(WormConfiguration.max_iterations):
|
||||||
|
ControlClient.keepalive()
|
||||||
|
ControlClient.load_control_config()
|
||||||
|
|
||||||
|
self._network.initialize()
|
||||||
|
|
||||||
|
self._fingerprint = HostFinger.get_instances()
|
||||||
|
|
||||||
|
self._exploiters = HostExploiter.get_classes()
|
||||||
|
|
||||||
|
if not self._keep_running or not WormConfiguration.alive:
|
||||||
|
break
|
||||||
|
|
||||||
|
machines = self._network.get_victim_machines(max_find=WormConfiguration.victims_max_find,
|
||||||
|
stop_callback=ControlClient.check_for_stop)
|
||||||
|
is_empty = True
|
||||||
|
for machine in machines:
|
||||||
|
if ControlClient.check_for_stop():
|
||||||
|
break
|
||||||
|
|
||||||
|
is_empty = False
|
||||||
|
for finger in self._fingerprint:
|
||||||
|
LOG.info("Trying to get OS fingerprint from %r with module %s",
|
||||||
|
machine, finger.__class__.__name__)
|
||||||
|
finger.get_host_fingerprint(machine)
|
||||||
|
|
||||||
|
ScanTelem(machine).send()
|
||||||
|
|
||||||
|
# skip machines that we've already exploited
|
||||||
|
if machine in self._exploited_machines:
|
||||||
|
LOG.debug("Skipping %r - already exploited",
|
||||||
|
machine)
|
||||||
|
continue
|
||||||
|
elif machine in self._fail_exploitation_machines:
|
||||||
|
if WormConfiguration.retry_failed_explotation:
|
||||||
|
LOG.debug("%r - exploitation failed before, trying again", machine)
|
||||||
|
else:
|
||||||
|
LOG.debug("Skipping %r - exploitation failed before", machine)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if monkey_tunnel:
|
||||||
|
monkey_tunnel.set_tunnel_for_host(machine)
|
||||||
|
if self._default_server:
|
||||||
|
if self._network.on_island(self._default_server):
|
||||||
|
machine.set_default_server(get_interface_to_target(machine.ip_addr) +
|
||||||
|
(':' + self._default_server_port if self._default_server_port else ''))
|
||||||
|
else:
|
||||||
|
machine.set_default_server(self._default_server)
|
||||||
|
LOG.debug("Default server for machine: %r set to %s" % (machine, machine.default_server))
|
||||||
|
|
||||||
|
# Order exploits according to their type
|
||||||
|
if WormConfiguration.should_exploit:
|
||||||
|
self._exploiters = sorted(self._exploiters, key=lambda exploiter_: exploiter_.EXPLOIT_TYPE.value)
|
||||||
|
host_exploited = False
|
||||||
|
for exploiter in [exploiter(machine) for exploiter in self._exploiters]:
|
||||||
|
if self.try_exploiting(machine, exploiter):
|
||||||
|
host_exploited = True
|
||||||
|
VictimHostTelem('T1210', ScanStatus.USED, machine=machine).send()
|
||||||
|
break
|
||||||
|
if not host_exploited:
|
||||||
|
self._fail_exploitation_machines.add(machine)
|
||||||
|
VictimHostTelem('T1210', ScanStatus.SCANNED, machine=machine).send()
|
||||||
|
if not self._keep_running:
|
||||||
|
break
|
||||||
|
|
||||||
|
if (not is_empty) and (WormConfiguration.max_iterations > iteration_index + 1):
|
||||||
|
time_to_sleep = WormConfiguration.timeout_between_iterations
|
||||||
|
LOG.info("Sleeping %d seconds before next life cycle iteration", time_to_sleep)
|
||||||
|
time.sleep(time_to_sleep)
|
||||||
|
|
||||||
|
if self._keep_running and WormConfiguration.alive:
|
||||||
|
LOG.info("Reached max iterations (%d)", WormConfiguration.max_iterations)
|
||||||
|
elif not WormConfiguration.alive:
|
||||||
|
LOG.info("Marked not alive from configuration")
|
||||||
|
|
||||||
|
# if host was exploited, before continue to closing the tunnel ensure the exploited host had its chance to
|
||||||
|
# connect to the tunnel
|
||||||
|
if len(self._exploited_machines) > 0:
|
||||||
|
time_to_sleep = WormConfiguration.keep_tunnel_open_time
|
||||||
|
LOG.info("Sleeping %d seconds for exploited machines to connect to tunnel", time_to_sleep)
|
||||||
|
time.sleep(time_to_sleep)
|
||||||
|
|
||||||
|
if monkey_tunnel:
|
||||||
|
monkey_tunnel.stop()
|
||||||
|
monkey_tunnel.join()
|
||||||
|
except PlannedShutdownException:
|
||||||
|
LOG.info("A planned shutdown of the Monkey occurred. Logging the reason and finishing execution.")
|
||||||
|
LOG.exception("Planned shutdown, reason:")
|
||||||
|
|
||||||
|
def shutdown_by_max_depth_reached(self):
|
||||||
|
if 0 == WormConfiguration.depth:
|
||||||
|
TraceTelem(MAX_DEPTH_REACHED_MESSAGE).send()
|
||||||
|
raise PlannedShutdownException(MAX_DEPTH_REACHED_MESSAGE)
|
||||||
|
else:
|
||||||
|
LOG.debug("Running with depth: %d" % WormConfiguration.depth)
|
||||||
|
|
||||||
|
def collect_system_info_if_configured(self):
|
||||||
if WormConfiguration.collect_system_info:
|
if WormConfiguration.collect_system_info:
|
||||||
LOG.debug("Calling system info collection")
|
LOG.debug("Calling system info collection")
|
||||||
system_info_collector = SystemInfoCollector()
|
system_info_collector = SystemInfoCollector()
|
||||||
system_info = system_info_collector.get_info()
|
system_info = system_info_collector.get_info()
|
||||||
SystemInfoTelem(system_info).send()
|
SystemInfoTelem(system_info).send()
|
||||||
|
|
||||||
# Executes post breach actions
|
def shutdown_by_not_alive_config(self):
|
||||||
PostBreach().execute()
|
if not WormConfiguration.alive:
|
||||||
|
raise PlannedShutdownException("Marked 'not alive' from configuration.")
|
||||||
|
|
||||||
if 0 == WormConfiguration.depth:
|
def upgrade_to_64_if_needed(self):
|
||||||
TraceTelem("Reached max depth, shutting down").send()
|
if WindowsUpgrader.should_upgrade():
|
||||||
return
|
self._upgrading_to_64 = True
|
||||||
else:
|
self._singleton.unlock()
|
||||||
LOG.debug("Running with depth: %d" % WormConfiguration.depth)
|
LOG.info("32bit monkey running on 64bit Windows. Upgrading.")
|
||||||
|
WindowsUpgrader.upgrade(self._opts)
|
||||||
for iteration_index in range(WormConfiguration.max_iterations):
|
raise PlannedShutdownException("Finished upgrading from 32bit to 64bit.")
|
||||||
ControlClient.keepalive()
|
|
||||||
ControlClient.load_control_config()
|
|
||||||
|
|
||||||
self._network.initialize()
|
|
||||||
|
|
||||||
self._fingerprint = HostFinger.get_instances()
|
|
||||||
|
|
||||||
self._exploiters = HostExploiter.get_classes()
|
|
||||||
|
|
||||||
if not self._keep_running or not WormConfiguration.alive:
|
|
||||||
break
|
|
||||||
|
|
||||||
machines = self._network.get_victim_machines(max_find=WormConfiguration.victims_max_find,
|
|
||||||
stop_callback=ControlClient.check_for_stop)
|
|
||||||
is_empty = True
|
|
||||||
for machine in machines:
|
|
||||||
if ControlClient.check_for_stop():
|
|
||||||
break
|
|
||||||
|
|
||||||
is_empty = False
|
|
||||||
for finger in self._fingerprint:
|
|
||||||
LOG.info("Trying to get OS fingerprint from %r with module %s",
|
|
||||||
machine, finger.__class__.__name__)
|
|
||||||
finger.get_host_fingerprint(machine)
|
|
||||||
|
|
||||||
ScanTelem(machine).send()
|
|
||||||
|
|
||||||
# skip machines that we've already exploited
|
|
||||||
if machine in self._exploited_machines:
|
|
||||||
LOG.debug("Skipping %r - already exploited",
|
|
||||||
machine)
|
|
||||||
continue
|
|
||||||
elif machine in self._fail_exploitation_machines:
|
|
||||||
if WormConfiguration.retry_failed_explotation:
|
|
||||||
LOG.debug("%r - exploitation failed before, trying again", machine)
|
|
||||||
else:
|
|
||||||
LOG.debug("Skipping %r - exploitation failed before", machine)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if monkey_tunnel:
|
|
||||||
monkey_tunnel.set_tunnel_for_host(machine)
|
|
||||||
if self._default_server:
|
|
||||||
if self._network.on_island(self._default_server):
|
|
||||||
machine.set_default_server(get_interface_to_target(machine.ip_addr) +
|
|
||||||
(':' + self._default_server_port if self._default_server_port else ''))
|
|
||||||
else:
|
|
||||||
machine.set_default_server(self._default_server)
|
|
||||||
LOG.debug("Default server for machine: %r set to %s" % (machine, machine.default_server))
|
|
||||||
|
|
||||||
# Order exploits according to their type
|
|
||||||
if WormConfiguration.should_exploit:
|
|
||||||
self._exploiters = sorted(self._exploiters, key=lambda exploiter_: exploiter_.EXPLOIT_TYPE.value)
|
|
||||||
host_exploited = False
|
|
||||||
for exploiter in [exploiter(machine) for exploiter in self._exploiters]:
|
|
||||||
if self.try_exploiting(machine, exploiter):
|
|
||||||
host_exploited = True
|
|
||||||
VictimHostTelem('T1210', ScanStatus.USED, machine=machine).send()
|
|
||||||
break
|
|
||||||
if not host_exploited:
|
|
||||||
self._fail_exploitation_machines.add(machine)
|
|
||||||
VictimHostTelem('T1210', ScanStatus.SCANNED, machine=machine).send()
|
|
||||||
if not self._keep_running:
|
|
||||||
break
|
|
||||||
|
|
||||||
if (not is_empty) and (WormConfiguration.max_iterations > iteration_index + 1):
|
|
||||||
time_to_sleep = WormConfiguration.timeout_between_iterations
|
|
||||||
LOG.info("Sleeping %d seconds before next life cycle iteration", time_to_sleep)
|
|
||||||
time.sleep(time_to_sleep)
|
|
||||||
|
|
||||||
if self._keep_running and WormConfiguration.alive:
|
|
||||||
LOG.info("Reached max iterations (%d)", WormConfiguration.max_iterations)
|
|
||||||
elif not WormConfiguration.alive:
|
|
||||||
LOG.info("Marked not alive from configuration")
|
|
||||||
|
|
||||||
# if host was exploited, before continue to closing the tunnel ensure the exploited host had its chance to
|
|
||||||
# connect to the tunnel
|
|
||||||
if len(self._exploited_machines) > 0:
|
|
||||||
time_to_sleep = WormConfiguration.keep_tunnel_open_time
|
|
||||||
LOG.info("Sleeping %d seconds for exploited machines to connect to tunnel", time_to_sleep)
|
|
||||||
time.sleep(time_to_sleep)
|
|
||||||
|
|
||||||
if monkey_tunnel:
|
|
||||||
monkey_tunnel.stop()
|
|
||||||
monkey_tunnel.join()
|
|
||||||
|
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
LOG.info("Monkey cleanup started")
|
LOG.info("Monkey cleanup started")
|
||||||
|
@ -346,9 +367,11 @@ class InfectionMonkey(object):
|
||||||
self._default_server_port = ''
|
self._default_server_port = ''
|
||||||
|
|
||||||
def set_default_server(self):
|
def set_default_server(self):
|
||||||
|
"""
|
||||||
|
Sets the default server for the Monkey to communicate back to.
|
||||||
|
:raises PlannedShutdownException if couldn't find the server.
|
||||||
|
"""
|
||||||
if not ControlClient.find_server(default_tunnel=self._default_tunnel):
|
if not ControlClient.find_server(default_tunnel=self._default_tunnel):
|
||||||
LOG.info("Monkey couldn't find server. Going down.")
|
raise PlannedShutdownException("Monkey couldn't find server with {} default tunnel.".format(self._default_tunnel))
|
||||||
return False
|
|
||||||
self._default_server = WormConfiguration.current_server
|
self._default_server = WormConfiguration.current_server
|
||||||
LOG.debug("default server set to: %s" % self._default_server)
|
LOG.debug("default server set to: %s" % self._default_server)
|
||||||
return True
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
# -*- mode: python -*-
|
# -*- mode: python -*-
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
|
|
||||||
|
@ -48,7 +49,7 @@ def is_windows():
|
||||||
|
|
||||||
|
|
||||||
def is_32_bit():
|
def is_32_bit():
|
||||||
return platform.architecture()[0] == "32bit"
|
return sys.maxsize <= 2**32
|
||||||
|
|
||||||
|
|
||||||
def get_bin_folder():
|
def get_bin_folder():
|
||||||
|
@ -93,7 +94,18 @@ def get_traceroute_binaries():
|
||||||
|
|
||||||
|
|
||||||
def get_monkey_filename():
|
def get_monkey_filename():
|
||||||
return 'monkey.exe' if is_windows() else 'monkey'
|
name = 'monkey-'
|
||||||
|
if is_windows():
|
||||||
|
name = name+"windows-"
|
||||||
|
else:
|
||||||
|
name = name+"linux-"
|
||||||
|
if is_32_bit():
|
||||||
|
name = name+"32"
|
||||||
|
else:
|
||||||
|
name = name+"64"
|
||||||
|
if is_windows():
|
||||||
|
name = name+".exe"
|
||||||
|
return name
|
||||||
|
|
||||||
|
|
||||||
def get_exe_strip():
|
def get_exe_strip():
|
||||||
|
|
|
@ -20,7 +20,7 @@ class PostBreach(object):
|
||||||
self.os_is_linux = not is_windows_os()
|
self.os_is_linux = not is_windows_os()
|
||||||
self.pba_list = self.config_to_pba_list()
|
self.pba_list = self.config_to_pba_list()
|
||||||
|
|
||||||
def execute(self):
|
def execute_all_configured(self):
|
||||||
"""
|
"""
|
||||||
Executes all post breach actions.
|
Executes all post breach actions.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
|
||||||
|
|
||||||
|
# Import all actions as modules
|
||||||
|
hiddenimports = collect_submodules('infection_monkey.system_info.collectors')
|
||||||
|
# Add action files that we enumerate
|
||||||
|
datas = (collect_data_files('infection_monkey.system_info.collectors', include_py_files=True))
|
|
@ -34,7 +34,7 @@ The monkey is composed of three separate parts.
|
||||||
6. To build the final exe:
|
6. To build the final exe:
|
||||||
- `cd monkey\infection_monkey`
|
- `cd monkey\infection_monkey`
|
||||||
- `build_windows.bat`
|
- `build_windows.bat`
|
||||||
- `output is placed under dist\monkey.exe`
|
- output is placed under `dist\monkey32.exe` or `dist\monkey64.exe` depending on your version of Python
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ Tested on Ubuntu 16.04.
|
||||||
- `chmod +x build_linux.sh`
|
- `chmod +x build_linux.sh`
|
||||||
- `./build_linux.sh`
|
- `./build_linux.sh`
|
||||||
|
|
||||||
output is placed under dist/monkey
|
output is placed under `dist/monkey32` or `dist/monkey64` depending on your version of python
|
||||||
|
|
||||||
### Sambacry
|
### Sambacry
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,9 @@ import psutil
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
|
|
||||||
from infection_monkey.network.info import get_host_subnets
|
from infection_monkey.network.info import get_host_subnets
|
||||||
from infection_monkey.system_info.aws_collector import AwsCollector
|
|
||||||
from infection_monkey.system_info.azure_cred_collector import AzureCollector
|
from infection_monkey.system_info.azure_cred_collector import AzureCollector
|
||||||
from infection_monkey.system_info.netstat_collector import NetstatCollector
|
from infection_monkey.system_info.netstat_collector import NetstatCollector
|
||||||
|
from infection_monkey.system_info.system_info_collectors_handler import SystemInfoCollectorsHandler
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -61,50 +61,12 @@ class InfoCollector(object):
|
||||||
self.info = {}
|
self.info = {}
|
||||||
|
|
||||||
def get_info(self):
|
def get_info(self):
|
||||||
self.get_hostname()
|
# Collect all hardcoded
|
||||||
self.get_process_list()
|
|
||||||
self.get_network_info()
|
self.get_network_info()
|
||||||
self.get_azure_info()
|
self.get_azure_info()
|
||||||
self.get_aws_info()
|
|
||||||
|
|
||||||
def get_hostname(self):
|
# Collect all plugins
|
||||||
"""
|
SystemInfoCollectorsHandler().execute_all_configured()
|
||||||
Adds the fully qualified computer hostname to the system information.
|
|
||||||
:return: None. Updates class information
|
|
||||||
"""
|
|
||||||
LOG.debug("Reading hostname")
|
|
||||||
self.info['hostname'] = socket.getfqdn()
|
|
||||||
|
|
||||||
def get_process_list(self):
|
|
||||||
"""
|
|
||||||
Adds process information from the host to the system information.
|
|
||||||
Currently lists process name, ID, parent ID, command line
|
|
||||||
and the full image path of each process.
|
|
||||||
:return: None. Updates class information
|
|
||||||
"""
|
|
||||||
LOG.debug("Reading process list")
|
|
||||||
processes = {}
|
|
||||||
for process in psutil.process_iter():
|
|
||||||
try:
|
|
||||||
processes[process.pid] = {"name": process.name(),
|
|
||||||
"pid": process.pid,
|
|
||||||
"ppid": process.ppid(),
|
|
||||||
"cmdline": " ".join(process.cmdline()),
|
|
||||||
"full_image_path": process.exe(),
|
|
||||||
}
|
|
||||||
except (psutil.AccessDenied, WindowsError):
|
|
||||||
# we may be running as non root
|
|
||||||
# and some processes are impossible to acquire in Windows/Linux
|
|
||||||
# in this case we'll just add what we can
|
|
||||||
processes[process.pid] = {"name": "null",
|
|
||||||
"pid": process.pid,
|
|
||||||
"ppid": process.ppid(),
|
|
||||||
"cmdline": "ACCESS DENIED",
|
|
||||||
"full_image_path": "null",
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
|
|
||||||
self.info['process_list'] = processes
|
|
||||||
|
|
||||||
def get_network_info(self):
|
def get_network_info(self):
|
||||||
"""
|
"""
|
||||||
|
@ -150,11 +112,3 @@ class InfoCollector(object):
|
||||||
except Exception:
|
except Exception:
|
||||||
# If we failed to collect azure info, no reason to fail all the collection. Log and continue.
|
# If we failed to collect azure info, no reason to fail all the collection. Log and continue.
|
||||||
LOG.error("Failed collecting Azure info.", exc_info=True)
|
LOG.error("Failed collecting Azure info.", exc_info=True)
|
||||||
|
|
||||||
def get_aws_info(self):
|
|
||||||
# noinspection PyBroadException
|
|
||||||
try:
|
|
||||||
self.info['aws'] = AwsCollector().get_aws_info()
|
|
||||||
except Exception:
|
|
||||||
# If we failed to collect aws info, no reason to fail all the collection. Log and continue.
|
|
||||||
LOG.error("Failed collecting AWS info.", exc_info=True)
|
|
||||||
|
|
|
@ -1,29 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from common.cloud.aws_instance import AwsInstance
|
|
||||||
|
|
||||||
__author__ = 'itay.mizeretz'
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class AwsCollector(object):
|
|
||||||
"""
|
|
||||||
Extract info from AWS machines
|
|
||||||
"""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_aws_info():
|
|
||||||
LOG.info("Collecting AWS info")
|
|
||||||
aws = AwsInstance()
|
|
||||||
info = {}
|
|
||||||
if aws.is_aws_instance():
|
|
||||||
LOG.info("Machine is an AWS instance")
|
|
||||||
info = \
|
|
||||||
{
|
|
||||||
'instance_id': aws.get_instance_id()
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
LOG.info("Machine is NOT an AWS instance")
|
|
||||||
|
|
||||||
return info
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
"""
|
||||||
|
This package holds all the dynamic (plugin) collectors
|
||||||
|
"""
|
|
@ -0,0 +1,31 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from common.cloud.aws.aws_instance import AwsInstance
|
||||||
|
from common.data.system_info_collectors_names import AWS_COLLECTOR
|
||||||
|
from infection_monkey.system_info.system_info_collector import SystemInfoCollector
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AwsCollector(SystemInfoCollector):
|
||||||
|
"""
|
||||||
|
Extract info from AWS machines.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(name=AWS_COLLECTOR)
|
||||||
|
|
||||||
|
def collect(self) -> dict:
|
||||||
|
logger.info("Collecting AWS info")
|
||||||
|
aws = AwsInstance()
|
||||||
|
info = {}
|
||||||
|
if aws.is_instance():
|
||||||
|
logger.info("Machine is an AWS instance")
|
||||||
|
info = \
|
||||||
|
{
|
||||||
|
'instance_id': aws.get_instance_id()
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
logger.info("Machine is NOT an AWS instance")
|
||||||
|
|
||||||
|
return info
|
|
@ -0,0 +1,24 @@
|
||||||
|
from common.cloud.all_instances import get_all_cloud_instances
|
||||||
|
from common.cloud.environment_names import Environment
|
||||||
|
from common.data.system_info_collectors_names import ENVIRONMENT_COLLECTOR
|
||||||
|
from infection_monkey.system_info.system_info_collector import SystemInfoCollector
|
||||||
|
|
||||||
|
|
||||||
|
def get_monkey_environment() -> str:
|
||||||
|
"""
|
||||||
|
Get the Monkey's running environment.
|
||||||
|
:return: One of the cloud providers if on cloud; otherwise, assumes "on premise".
|
||||||
|
"""
|
||||||
|
for instance in get_all_cloud_instances():
|
||||||
|
if instance.is_instance():
|
||||||
|
return instance.get_cloud_provider_name().value
|
||||||
|
|
||||||
|
return Environment.ON_PREMISE.value
|
||||||
|
|
||||||
|
|
||||||
|
class EnvironmentCollector(SystemInfoCollector):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(name=ENVIRONMENT_COLLECTOR)
|
||||||
|
|
||||||
|
def collect(self) -> dict:
|
||||||
|
return {"environment": get_monkey_environment()}
|
|
@ -0,0 +1,16 @@
|
||||||
|
import logging
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from common.data.system_info_collectors_names import HOSTNAME_COLLECTOR
|
||||||
|
from infection_monkey.system_info.system_info_collector import SystemInfoCollector
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class HostnameCollector(SystemInfoCollector):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(name=HOSTNAME_COLLECTOR)
|
||||||
|
|
||||||
|
def collect(self) -> dict:
|
||||||
|
return {"hostname": socket.getfqdn()}
|
|
@ -0,0 +1,50 @@
|
||||||
|
import logging
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
from common.data.system_info_collectors_names import PROCESS_LIST_COLLECTOR
|
||||||
|
from infection_monkey.system_info.system_info_collector import SystemInfoCollector
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Linux doesn't have WindowsError
|
||||||
|
try:
|
||||||
|
WindowsError
|
||||||
|
except NameError:
|
||||||
|
# noinspection PyShadowingBuiltins
|
||||||
|
WindowsError = psutil.AccessDenied
|
||||||
|
|
||||||
|
|
||||||
|
class ProcessListCollector(SystemInfoCollector):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(name=PROCESS_LIST_COLLECTOR)
|
||||||
|
|
||||||
|
def collect(self) -> dict:
|
||||||
|
"""
|
||||||
|
Adds process information from the host to the system information.
|
||||||
|
Currently lists process name, ID, parent ID, command line
|
||||||
|
and the full image path of each process.
|
||||||
|
"""
|
||||||
|
logger.debug("Reading process list")
|
||||||
|
processes = {}
|
||||||
|
for process in psutil.process_iter():
|
||||||
|
try:
|
||||||
|
processes[process.pid] = {
|
||||||
|
"name": process.name(),
|
||||||
|
"pid": process.pid,
|
||||||
|
"ppid": process.ppid(),
|
||||||
|
"cmdline": " ".join(process.cmdline()),
|
||||||
|
"full_image_path": process.exe(),
|
||||||
|
}
|
||||||
|
except (psutil.AccessDenied, WindowsError):
|
||||||
|
# we may be running as non root and some processes are impossible to acquire in Windows/Linux.
|
||||||
|
# In this case we'll just add what we know.
|
||||||
|
processes[process.pid] = {
|
||||||
|
"name": "null",
|
||||||
|
"pid": process.pid,
|
||||||
|
"ppid": process.ppid(),
|
||||||
|
"cmdline": "ACCESS DENIED",
|
||||||
|
"full_image_path": "null",
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
|
||||||
|
return {'process_list': processes}
|
|
@ -0,0 +1,38 @@
|
||||||
|
from infection_monkey.config import WormConfiguration
|
||||||
|
from infection_monkey.utils.plugins.plugin import Plugin
|
||||||
|
from abc import ABCMeta, abstractmethod
|
||||||
|
|
||||||
|
import infection_monkey.system_info.collectors
|
||||||
|
|
||||||
|
|
||||||
|
class SystemInfoCollector(Plugin, metaclass=ABCMeta):
|
||||||
|
"""
|
||||||
|
ABC for system info collection. See system_info_collector_handler for more info. Basically, to implement a new system info
|
||||||
|
collector, inherit from this class in an implementation in the infection_monkey.system_info.collectors class, and override
|
||||||
|
the 'collect' method. Don't forget to parse your results in the Monkey Island and to add the collector to the configuration
|
||||||
|
as well - see monkey_island.cc.services.processing.system_info_collectors for examples.
|
||||||
|
|
||||||
|
See the Wiki page "How to add a new System Info Collector to the Monkey?" for a detailed guide.
|
||||||
|
"""
|
||||||
|
def __init__(self, name="unknown"):
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def should_run(class_name) -> bool:
|
||||||
|
return class_name in WormConfiguration.system_info_collectors_classes
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def base_package_file():
|
||||||
|
return infection_monkey.system_info.collectors.__file__
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def base_package_name():
|
||||||
|
return infection_monkey.system_info.collectors.__package__
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def collect(self) -> dict:
|
||||||
|
"""
|
||||||
|
Collect the relevant information and return it in a dictionary.
|
||||||
|
To be implemented by each collector.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
|
@ -0,0 +1,33 @@
|
||||||
|
import logging
|
||||||
|
from typing import Sequence
|
||||||
|
|
||||||
|
from infection_monkey.system_info.system_info_collector import SystemInfoCollector
|
||||||
|
from infection_monkey.telemetry.system_info_telem import SystemInfoTelem
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class SystemInfoCollectorsHandler(object):
|
||||||
|
def __init__(self):
|
||||||
|
self.collectors_list = self.config_to_collectors_list()
|
||||||
|
|
||||||
|
def execute_all_configured(self):
|
||||||
|
successful_collections = 0
|
||||||
|
system_info_telemetry = {}
|
||||||
|
for collector in self.collectors_list:
|
||||||
|
try:
|
||||||
|
LOG.debug("Executing system info collector: '{}'".format(collector.name))
|
||||||
|
collected_info = collector.collect()
|
||||||
|
system_info_telemetry[collector.name] = collected_info
|
||||||
|
successful_collections += 1
|
||||||
|
except Exception as e:
|
||||||
|
# If we failed one collector, no need to stop execution. Log and continue.
|
||||||
|
LOG.error("Collector {} failed. Error info: {}".format(collector.name, e))
|
||||||
|
LOG.info("All system info collectors executed. Total {} executed, out of which {} collected successfully.".
|
||||||
|
format(len(self.collectors_list), successful_collections))
|
||||||
|
|
||||||
|
SystemInfoTelem({"collectors": system_info_telemetry}).send()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def config_to_collectors_list() -> Sequence[SystemInfoCollector]:
|
||||||
|
return SystemInfoCollector.get_instances()
|
|
@ -1,6 +1,6 @@
|
||||||
import monkey_island.cc.auth
|
import monkey_island.cc.auth
|
||||||
from monkey_island.cc.environment import Environment
|
from monkey_island.cc.environment import Environment
|
||||||
from common.cloud.aws_instance import AwsInstance
|
from common.cloud.aws.aws_instance import AwsInstance
|
||||||
|
|
||||||
__author__ = 'itay.mizeretz'
|
__author__ = 'itay.mizeretz'
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ from monkey_island.cc.models.monkey_ttl import MonkeyTtl, create_monkey_ttl_docu
|
||||||
from monkey_island.cc.consts import DEFAULT_MONKEY_TTL_EXPIRY_DURATION_IN_SECONDS
|
from monkey_island.cc.consts import DEFAULT_MONKEY_TTL_EXPIRY_DURATION_IN_SECONDS
|
||||||
from monkey_island.cc.models.command_control_channel import CommandControlChannel
|
from monkey_island.cc.models.command_control_channel import CommandControlChannel
|
||||||
from monkey_island.cc.utils import local_ip_addresses
|
from monkey_island.cc.utils import local_ip_addresses
|
||||||
|
from common.cloud import environment_names
|
||||||
|
|
||||||
MAX_MONKEYS_AMOUNT_TO_CACHE = 100
|
MAX_MONKEYS_AMOUNT_TO_CACHE = 100
|
||||||
|
|
||||||
|
@ -42,6 +43,10 @@ class Monkey(Document):
|
||||||
ttl_ref = ReferenceField(MonkeyTtl)
|
ttl_ref = ReferenceField(MonkeyTtl)
|
||||||
tunnel = ReferenceField("self")
|
tunnel = ReferenceField("self")
|
||||||
command_control_channel = EmbeddedDocumentField(CommandControlChannel)
|
command_control_channel = EmbeddedDocumentField(CommandControlChannel)
|
||||||
|
|
||||||
|
# Environment related fields
|
||||||
|
environment = StringField(default=environment_names.Environment.UNKNOWN.value,
|
||||||
|
choices=environment_names.ALL_ENVIRONMENTS_NAMES)
|
||||||
aws_instance_id = StringField(required=False) # This field only exists when the monkey is running on an AWS
|
aws_instance_id = StringField(required=False) # This field only exists when the monkey is running on an AWS
|
||||||
|
|
||||||
# instance. See https://github.com/guardicore/monkey/issues/426.
|
# instance. See https://github.com/guardicore/monkey/issues/426.
|
||||||
|
@ -55,7 +60,8 @@ class Monkey(Document):
|
||||||
raise MonkeyNotFoundError("info: {0} | id: {1}".format(ex, str(db_id)))
|
raise MonkeyNotFoundError("info: {0} | id: {1}".format(ex, str(db_id)))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_single_monkey_by_guid(monkey_guid):
|
# See https://www.python.org/dev/peps/pep-0484/#forward-references
|
||||||
|
def get_single_monkey_by_guid(monkey_guid) -> 'Monkey':
|
||||||
try:
|
try:
|
||||||
return Monkey.objects.get(guid=monkey_guid)
|
return Monkey.objects.get(guid=monkey_guid)
|
||||||
except DoesNotExist as ex:
|
except DoesNotExist as ex:
|
||||||
|
|
|
@ -6,7 +6,7 @@ import flask_restful
|
||||||
|
|
||||||
from monkey_island.cc.auth import jwt_required
|
from monkey_island.cc.auth import jwt_required
|
||||||
from monkey_island.cc.services.remote_run_aws import RemoteRunAwsService
|
from monkey_island.cc.services.remote_run_aws import RemoteRunAwsService
|
||||||
from common.cloud.aws_service import AwsService
|
from common.cloud.aws.aws_service import AwsService
|
||||||
|
|
||||||
CLIENT_ERROR_FORMAT = "ClientError, error message: '{}'. Probably, the IAM role that has been associated with the " \
|
CLIENT_ERROR_FORMAT = "ClientError, error message: '{}'. Probably, the IAM role that has been associated with the " \
|
||||||
"instance doesn't permit SSM calls. "
|
"instance doesn't permit SSM calls. "
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
from common.data.system_info_collectors_names \
|
||||||
|
import AWS_COLLECTOR, ENVIRONMENT_COLLECTOR, HOSTNAME_COLLECTOR, PROCESS_LIST_COLLECTOR
|
||||||
|
|
||||||
WARNING_SIGN = " \u26A0"
|
WARNING_SIGN = " \u26A0"
|
||||||
|
|
||||||
SCHEMA = {
|
SCHEMA = {
|
||||||
|
@ -99,6 +102,44 @@ SCHEMA = {
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"system_info_collectors_classes": {
|
||||||
|
"title": "System Information Collectors",
|
||||||
|
"type": "string",
|
||||||
|
"anyOf": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
ENVIRONMENT_COLLECTOR
|
||||||
|
],
|
||||||
|
"title": "Collect which environment this machine is on (on prem/cloud)",
|
||||||
|
"attack_techniques": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
AWS_COLLECTOR
|
||||||
|
],
|
||||||
|
"title": "If on AWS, collect more information about the instance",
|
||||||
|
"attack_techniques": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
HOSTNAME_COLLECTOR
|
||||||
|
],
|
||||||
|
"title": "Collect the machine's hostname",
|
||||||
|
"attack_techniques": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"enum": [
|
||||||
|
PROCESS_LIST_COLLECTOR
|
||||||
|
],
|
||||||
|
"title": "Collect running processes on the machine",
|
||||||
|
"attack_techniques": []
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
"post_breach_acts": {
|
"post_breach_acts": {
|
||||||
"title": "Post breach actions",
|
"title": "Post breach actions",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
@ -433,6 +474,21 @@ SCHEMA = {
|
||||||
"attack_techniques": ["T1003"],
|
"attack_techniques": ["T1003"],
|
||||||
"description": "Determines whether to use Mimikatz"
|
"description": "Determines whether to use Mimikatz"
|
||||||
},
|
},
|
||||||
|
"system_info_collectors_classes": {
|
||||||
|
"title": "System info collectors",
|
||||||
|
"type": "array",
|
||||||
|
"uniqueItems": True,
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/system_info_collectors_classes"
|
||||||
|
},
|
||||||
|
"default": [
|
||||||
|
ENVIRONMENT_COLLECTOR,
|
||||||
|
AWS_COLLECTOR,
|
||||||
|
HOSTNAME_COLLECTOR,
|
||||||
|
PROCESS_LIST_COLLECTOR
|
||||||
|
],
|
||||||
|
"description": "Determines which system information collectors will collect information."
|
||||||
|
},
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"life_cycle": {
|
"life_cycle": {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from common.cloud.aws_instance import AwsInstance
|
from common.cloud.aws.aws_instance import AwsInstance
|
||||||
from common.cloud.aws_service import AwsService
|
from common.cloud.aws.aws_service import AwsService
|
||||||
from common.cmd.aws.aws_cmd_runner import AwsCmdRunner
|
from common.cmd.aws.aws_cmd_runner import AwsCmdRunner
|
||||||
from common.cmd.cmd import Cmd
|
from common.cmd.cmd import Cmd
|
||||||
from common.cmd.cmd_runner import CmdRunner
|
from common.cmd.cmd_runner import CmdRunner
|
||||||
|
@ -54,7 +54,7 @@ class RemoteRunAwsService:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def is_running_on_aws():
|
def is_running_on_aws():
|
||||||
return RemoteRunAwsService.aws_instance.is_aws_instance()
|
return RemoteRunAwsService.aws_instance.is_instance()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def update_aws_region_authless():
|
def update_aws_region_authless():
|
||||||
|
@ -130,7 +130,8 @@ class RemoteRunAwsService:
|
||||||
return r"[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {" \
|
return r"[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {" \
|
||||||
r"$true}; (New-Object System.Net.WebClient).DownloadFile('https://" + island_ip + \
|
r"$true}; (New-Object System.Net.WebClient).DownloadFile('https://" + island_ip + \
|
||||||
r":5000/api/monkey/download/monkey-windows-" + bit_text + r".exe','.\\monkey.exe'); " \
|
r":5000/api/monkey/download/monkey-windows-" + bit_text + r".exe','.\\monkey.exe'); " \
|
||||||
r";Start-Process -FilePath '.\\monkey.exe' -ArgumentList 'm0nk3y -s " + island_ip + r":5000'; "
|
r";Start-Process -FilePath '.\\monkey.exe' " \
|
||||||
|
r"-ArgumentList 'm0nk3y -s " + island_ip + r":5000'; "
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_run_monkey_cmd_line(is_linux, is_64bit, island_ip):
|
def _get_run_monkey_cmd_line(is_linux, is_64bit, island_ip):
|
||||||
|
|
|
@ -5,7 +5,7 @@ from datetime import datetime
|
||||||
import boto3
|
import boto3
|
||||||
from botocore.exceptions import UnknownServiceError
|
from botocore.exceptions import UnknownServiceError
|
||||||
|
|
||||||
from common.cloud.aws_instance import AwsInstance
|
from common.cloud.aws.aws_instance import AwsInstance
|
||||||
from monkey_island.cc.environment.environment import load_server_configuration_from_file
|
from monkey_island.cc.environment.environment import load_server_configuration_from_file
|
||||||
from monkey_island.cc.services.reporting.exporter import Exporter
|
from monkey_island.cc.services.reporting.exporter import Exporter
|
||||||
|
|
||||||
|
|
|
@ -386,10 +386,11 @@ class ReportService:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_monkey_subnets(monkey_guid):
|
def get_monkey_subnets(monkey_guid):
|
||||||
network_info = mongo.db.telemetry.find_one(
|
network_info = mongo.db.telemetry.find_one(
|
||||||
{'telem_category': 'system_info', 'monkey_guid': monkey_guid},
|
{'telem_category': 'system_info',
|
||||||
|
'monkey_guid': monkey_guid},
|
||||||
{'data.network_info.networks': 1}
|
{'data.network_info.networks': 1}
|
||||||
)
|
)
|
||||||
if network_info is None:
|
if network_info is None or not network_info["data"]:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
return \
|
return \
|
||||||
|
|
|
@ -15,28 +15,34 @@ __regular_report_generating_lock = threading.Semaphore()
|
||||||
def safe_generate_reports():
|
def safe_generate_reports():
|
||||||
# Entering the critical section; Wait until report generation is available.
|
# Entering the critical section; Wait until report generation is available.
|
||||||
__report_generating_lock.acquire()
|
__report_generating_lock.acquire()
|
||||||
report = safe_generate_regular_report()
|
try:
|
||||||
attack_report = safe_generate_attack_report()
|
report = safe_generate_regular_report()
|
||||||
# Leaving the critical section.
|
attack_report = safe_generate_attack_report()
|
||||||
__report_generating_lock.release()
|
finally:
|
||||||
|
# Leaving the critical section.
|
||||||
|
__report_generating_lock.release()
|
||||||
return report, attack_report
|
return report, attack_report
|
||||||
|
|
||||||
|
|
||||||
def safe_generate_regular_report():
|
def safe_generate_regular_report():
|
||||||
# Local import to avoid circular imports
|
# Local import to avoid circular imports
|
||||||
from monkey_island.cc.services.reporting.report import ReportService
|
from monkey_island.cc.services.reporting.report import ReportService
|
||||||
__regular_report_generating_lock.acquire()
|
try:
|
||||||
report = ReportService.generate_report()
|
__regular_report_generating_lock.acquire()
|
||||||
__regular_report_generating_lock.release()
|
report = ReportService.generate_report()
|
||||||
|
finally:
|
||||||
|
__regular_report_generating_lock.release()
|
||||||
return report
|
return report
|
||||||
|
|
||||||
|
|
||||||
def safe_generate_attack_report():
|
def safe_generate_attack_report():
|
||||||
# Local import to avoid circular imports
|
# Local import to avoid circular imports
|
||||||
from monkey_island.cc.services.attack.attack_report import AttackReportService
|
from monkey_island.cc.services.attack.attack_report import AttackReportService
|
||||||
__attack_report_generating_lock.acquire()
|
try:
|
||||||
attack_report = AttackReportService.generate_new_report()
|
__attack_report_generating_lock.acquire()
|
||||||
__attack_report_generating_lock.release()
|
attack_report = AttackReportService.generate_new_report()
|
||||||
|
finally:
|
||||||
|
__attack_report_generating_lock.release()
|
||||||
return attack_report
|
return attack_report
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,25 +1,23 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from monkey_island.cc.database import mongo
|
|
||||||
from monkey_island.cc.models import Monkey
|
|
||||||
from monkey_island.cc.services import mimikatz_utils
|
|
||||||
from monkey_island.cc.services.node import NodeService
|
|
||||||
from monkey_island.cc.services.config import ConfigService
|
|
||||||
from monkey_island.cc.services.telemetry.zero_trust_tests.antivirus_existence import test_antivirus_existence
|
|
||||||
from monkey_island.cc.services.wmi_handler import WMIHandler
|
|
||||||
from monkey_island.cc.encryptor import encryptor
|
from monkey_island.cc.encryptor import encryptor
|
||||||
|
from monkey_island.cc.services import mimikatz_utils
|
||||||
|
from monkey_island.cc.services.config import ConfigService
|
||||||
|
from monkey_island.cc.services.node import NodeService
|
||||||
|
from monkey_island.cc.services.telemetry.processing.system_info_collectors.system_info_telemetry_dispatcher import \
|
||||||
|
SystemInfoTelemetryDispatcher
|
||||||
|
from monkey_island.cc.services.wmi_handler import WMIHandler
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def process_system_info_telemetry(telemetry_json):
|
def process_system_info_telemetry(telemetry_json):
|
||||||
|
dispatcher = SystemInfoTelemetryDispatcher()
|
||||||
telemetry_processing_stages = [
|
telemetry_processing_stages = [
|
||||||
process_ssh_info,
|
process_ssh_info,
|
||||||
process_credential_info,
|
process_credential_info,
|
||||||
process_mimikatz_and_wmi_info,
|
process_mimikatz_and_wmi_info,
|
||||||
process_aws_data,
|
dispatcher.dispatch_collector_results_to_relevant_processors
|
||||||
update_db_with_new_hostname,
|
|
||||||
test_antivirus_existence,
|
|
||||||
]
|
]
|
||||||
|
|
||||||
# Calling safe_process_telemetry so if one of the stages fail, we log and move on instead of failing the rest of
|
# Calling safe_process_telemetry so if one of the stages fail, we log and move on instead of failing the rest of
|
||||||
|
@ -34,7 +32,7 @@ def safe_process_telemetry(processing_function, telemetry_json):
|
||||||
processing_function(telemetry_json)
|
processing_function(telemetry_json)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Error {} while in {} stage of processing telemetry.".format(str(err), processing_function.func_name),
|
"Error {} while in {} stage of processing telemetry.".format(str(err), processing_function.__name__),
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
|
|
||||||
|
|
||||||
|
@ -104,14 +102,3 @@ def process_mimikatz_and_wmi_info(telemetry_json):
|
||||||
wmi_handler = WMIHandler(monkey_id, telemetry_json['data']['wmi'], users_secrets)
|
wmi_handler = WMIHandler(monkey_id, telemetry_json['data']['wmi'], users_secrets)
|
||||||
wmi_handler.process_and_handle_wmi_info()
|
wmi_handler.process_and_handle_wmi_info()
|
||||||
|
|
||||||
|
|
||||||
def process_aws_data(telemetry_json):
|
|
||||||
if 'aws' in telemetry_json['data']:
|
|
||||||
if 'instance_id' in telemetry_json['data']['aws']:
|
|
||||||
monkey_id = NodeService.get_monkey_by_guid(telemetry_json['monkey_guid']).get('_id')
|
|
||||||
mongo.db.monkey.update_one({'_id': monkey_id},
|
|
||||||
{'$set': {'aws_instance_id': telemetry_json['data']['aws']['instance_id']}})
|
|
||||||
|
|
||||||
|
|
||||||
def update_db_with_new_hostname(telemetry_json):
|
|
||||||
Monkey.get_single_monkey_by_guid(telemetry_json['monkey_guid']).set_hostname(telemetry_json['data']['hostname'])
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from monkey_island.cc.models.monkey import Monkey
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def process_aws_telemetry(collector_results, monkey_guid):
|
||||||
|
relevant_monkey = Monkey.get_single_monkey_by_guid(monkey_guid)
|
||||||
|
|
||||||
|
if "instance_id" in collector_results:
|
||||||
|
instance_id = collector_results["instance_id"]
|
||||||
|
relevant_monkey.aws_instance_id = instance_id
|
||||||
|
relevant_monkey.save()
|
||||||
|
logger.debug("Updated Monkey {} with aws instance id {}".format(str(relevant_monkey), instance_id))
|
|
@ -0,0 +1,12 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from monkey_island.cc.models.monkey import Monkey
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def process_environment_telemetry(collector_results, monkey_guid):
|
||||||
|
relevant_monkey = Monkey.get_single_monkey_by_guid(monkey_guid)
|
||||||
|
relevant_monkey.environment = collector_results["environment"]
|
||||||
|
relevant_monkey.save()
|
||||||
|
logger.debug("Updated Monkey {} with env {}".format(str(relevant_monkey), collector_results))
|
|
@ -0,0 +1,9 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from monkey_island.cc.models.monkey import Monkey
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def process_hostname_telemetry(collector_results, monkey_guid):
|
||||||
|
Monkey.get_single_monkey_by_guid(monkey_guid).set_hostname(collector_results["hostname"])
|
|
@ -0,0 +1,64 @@
|
||||||
|
import logging
|
||||||
|
import typing
|
||||||
|
|
||||||
|
from common.data.system_info_collectors_names \
|
||||||
|
import AWS_COLLECTOR, ENVIRONMENT_COLLECTOR, HOSTNAME_COLLECTOR, PROCESS_LIST_COLLECTOR
|
||||||
|
from monkey_island.cc.services.telemetry.processing.system_info_collectors.aws import process_aws_telemetry
|
||||||
|
from monkey_island.cc.services.telemetry.processing.system_info_collectors.environment import process_environment_telemetry
|
||||||
|
from monkey_island.cc.services.telemetry.processing.system_info_collectors.hostname import process_hostname_telemetry
|
||||||
|
from monkey_island.cc.services.telemetry.zero_trust_tests.antivirus_existence import test_antivirus_existence
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SYSTEM_INFO_COLLECTOR_TO_TELEMETRY_PROCESSORS = {
|
||||||
|
AWS_COLLECTOR: [process_aws_telemetry],
|
||||||
|
ENVIRONMENT_COLLECTOR: [process_environment_telemetry],
|
||||||
|
HOSTNAME_COLLECTOR: [process_hostname_telemetry],
|
||||||
|
PROCESS_LIST_COLLECTOR: [test_antivirus_existence]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SystemInfoTelemetryDispatcher(object):
|
||||||
|
def __init__(self, collector_to_parsing_functions: typing.Mapping[str, typing.List[typing.Callable]] = None):
|
||||||
|
"""
|
||||||
|
:param collector_to_parsing_functions: Map between collector names and a list of functions that process the output of
|
||||||
|
that collector. If `None` is supplied, uses the default one; This should be the normal flow, overriding the
|
||||||
|
collector->functions mapping is useful mostly for testing.
|
||||||
|
"""
|
||||||
|
if collector_to_parsing_functions is None:
|
||||||
|
collector_to_parsing_functions = SYSTEM_INFO_COLLECTOR_TO_TELEMETRY_PROCESSORS
|
||||||
|
self.collector_to_processing_functions = collector_to_parsing_functions
|
||||||
|
|
||||||
|
def dispatch_collector_results_to_relevant_processors(self, telemetry_json):
|
||||||
|
"""
|
||||||
|
If the telemetry has collectors' results, dispatches the results to the relevant processing functions.
|
||||||
|
:param telemetry_json: Telemetry sent from the Monkey
|
||||||
|
"""
|
||||||
|
if "collectors" in telemetry_json["data"]:
|
||||||
|
self.dispatch_single_result_to_relevant_processor(telemetry_json)
|
||||||
|
|
||||||
|
def dispatch_single_result_to_relevant_processor(self, telemetry_json):
|
||||||
|
relevant_monkey_guid = telemetry_json['monkey_guid']
|
||||||
|
|
||||||
|
for collector_name, collector_results in telemetry_json["data"]["collectors"].items():
|
||||||
|
self.dispatch_result_of_single_collector_to_processing_functions(
|
||||||
|
collector_name,
|
||||||
|
collector_results,
|
||||||
|
relevant_monkey_guid)
|
||||||
|
|
||||||
|
def dispatch_result_of_single_collector_to_processing_functions(
|
||||||
|
self,
|
||||||
|
collector_name,
|
||||||
|
collector_results,
|
||||||
|
relevant_monkey_guid):
|
||||||
|
if collector_name in self.collector_to_processing_functions:
|
||||||
|
for processing_function in self.collector_to_processing_functions[collector_name]:
|
||||||
|
# noinspection PyBroadException
|
||||||
|
try:
|
||||||
|
processing_function(collector_results, relevant_monkey_guid)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
"Error {} while processing {} system info telemetry".format(str(e), collector_name),
|
||||||
|
exc_info=True)
|
||||||
|
else:
|
||||||
|
logger.warning("Unknown system info collector name: {}".format(collector_name))
|
|
@ -0,0 +1,57 @@
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from monkey_island.cc.models import Monkey
|
||||||
|
from monkey_island.cc.services.telemetry.processing.system_info_collectors.system_info_telemetry_dispatcher import \
|
||||||
|
SystemInfoTelemetryDispatcher
|
||||||
|
from monkey_island.cc.services.telemetry.processing.system_info_collectors.system_info_telemetry_dispatcher import \
|
||||||
|
process_aws_telemetry
|
||||||
|
from monkey_island.cc.testing.IslandTestCase import IslandTestCase
|
||||||
|
|
||||||
|
TEST_SYS_INFO_TO_PROCESSING = {
|
||||||
|
"AwsCollector": [process_aws_telemetry],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class SystemInfoTelemetryDispatcherTest(IslandTestCase):
|
||||||
|
def test_dispatch_to_relevant_collector_bad_inputs(self):
|
||||||
|
self.fail_if_not_testing_env()
|
||||||
|
|
||||||
|
dispatcher = SystemInfoTelemetryDispatcher(TEST_SYS_INFO_TO_PROCESSING)
|
||||||
|
|
||||||
|
# Bad format telem JSONs - throws
|
||||||
|
bad_empty_telem_json = {}
|
||||||
|
self.assertRaises(KeyError, dispatcher.dispatch_collector_results_to_relevant_processors, bad_empty_telem_json)
|
||||||
|
bad_no_data_telem_json = {"monkey_guid": "bla"}
|
||||||
|
self.assertRaises(KeyError, dispatcher.dispatch_collector_results_to_relevant_processors, bad_no_data_telem_json)
|
||||||
|
bad_no_monkey_telem_json = {"data": {"collectors": {"AwsCollector": "Bla"}}}
|
||||||
|
self.assertRaises(KeyError, dispatcher.dispatch_collector_results_to_relevant_processors, bad_no_monkey_telem_json)
|
||||||
|
|
||||||
|
# Telem JSON with no collectors - nothing gets dispatched
|
||||||
|
good_telem_no_collectors = {"monkey_guid": "bla", "data": {"bla": "bla"}}
|
||||||
|
good_telem_empty_collectors = {"monkey_guid": "bla", "data": {"bla": "bla", "collectors": {}}}
|
||||||
|
|
||||||
|
dispatcher.dispatch_collector_results_to_relevant_processors(good_telem_no_collectors)
|
||||||
|
dispatcher.dispatch_collector_results_to_relevant_processors(good_telem_empty_collectors)
|
||||||
|
|
||||||
|
def test_dispatch_to_relevant_collector(self):
|
||||||
|
self.fail_if_not_testing_env()
|
||||||
|
self.clean_monkey_db()
|
||||||
|
|
||||||
|
a_monkey = Monkey(guid=str(uuid.uuid4()))
|
||||||
|
a_monkey.save()
|
||||||
|
|
||||||
|
dispatcher = SystemInfoTelemetryDispatcher()
|
||||||
|
|
||||||
|
# JSON with results - make sure functions are called
|
||||||
|
instance_id = "i-0bd2c14bd4c7d703f"
|
||||||
|
telem_json = {
|
||||||
|
"data": {
|
||||||
|
"collectors": {
|
||||||
|
"AwsCollector": {"instance_id": instance_id},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"monkey_guid": a_monkey.guid
|
||||||
|
}
|
||||||
|
dispatcher.dispatch_collector_results_to_relevant_processors(telem_json)
|
||||||
|
|
||||||
|
self.assertEquals(Monkey.get_single_monkey_by_guid(a_monkey.guid).aws_instance_id, instance_id)
|
|
@ -7,36 +7,36 @@ from monkey_island.cc.models.zero_trust.event import Event
|
||||||
from monkey_island.cc.services.telemetry.zero_trust_tests.known_anti_viruses import ANTI_VIRUS_KNOWN_PROCESS_NAMES
|
from monkey_island.cc.services.telemetry.zero_trust_tests.known_anti_viruses import ANTI_VIRUS_KNOWN_PROCESS_NAMES
|
||||||
|
|
||||||
|
|
||||||
def test_antivirus_existence(telemetry_json):
|
def test_antivirus_existence(process_list_json, monkey_guid):
|
||||||
current_monkey = Monkey.get_single_monkey_by_guid(telemetry_json['monkey_guid'])
|
current_monkey = Monkey.get_single_monkey_by_guid(monkey_guid)
|
||||||
if 'process_list' in telemetry_json['data']:
|
|
||||||
process_list_event = Event.create_event(
|
|
||||||
title="Process list",
|
|
||||||
message="Monkey on {} scanned the process list".format(current_monkey.hostname),
|
|
||||||
event_type=zero_trust_consts.EVENT_TYPE_MONKEY_LOCAL)
|
|
||||||
events = [process_list_event]
|
|
||||||
|
|
||||||
av_processes = filter_av_processes(telemetry_json)
|
process_list_event = Event.create_event(
|
||||||
|
title="Process list",
|
||||||
|
message="Monkey on {} scanned the process list".format(current_monkey.hostname),
|
||||||
|
event_type=zero_trust_consts.EVENT_TYPE_MONKEY_LOCAL)
|
||||||
|
events = [process_list_event]
|
||||||
|
|
||||||
for process in av_processes:
|
av_processes = filter_av_processes(process_list_json["process_list"])
|
||||||
events.append(Event.create_event(
|
|
||||||
title="Found AV process",
|
|
||||||
message="The process '{}' was recognized as an Anti Virus process. Process "
|
|
||||||
"details: {}".format(process[1]['name'], json.dumps(process[1])),
|
|
||||||
event_type=zero_trust_consts.EVENT_TYPE_MONKEY_LOCAL
|
|
||||||
))
|
|
||||||
|
|
||||||
if len(av_processes) > 0:
|
for process in av_processes:
|
||||||
test_status = zero_trust_consts.STATUS_PASSED
|
events.append(Event.create_event(
|
||||||
else:
|
title="Found AV process",
|
||||||
test_status = zero_trust_consts.STATUS_FAILED
|
message="The process '{}' was recognized as an Anti Virus process. Process "
|
||||||
AggregateFinding.create_or_add_to_existing(
|
"details: {}".format(process[1]['name'], json.dumps(process[1])),
|
||||||
test=zero_trust_consts.TEST_ENDPOINT_SECURITY_EXISTS, status=test_status, events=events
|
event_type=zero_trust_consts.EVENT_TYPE_MONKEY_LOCAL
|
||||||
)
|
))
|
||||||
|
|
||||||
|
if len(av_processes) > 0:
|
||||||
|
test_status = zero_trust_consts.STATUS_PASSED
|
||||||
|
else:
|
||||||
|
test_status = zero_trust_consts.STATUS_FAILED
|
||||||
|
AggregateFinding.create_or_add_to_existing(
|
||||||
|
test=zero_trust_consts.TEST_ENDPOINT_SECURITY_EXISTS, status=test_status, events=events
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def filter_av_processes(telemetry_json):
|
def filter_av_processes(process_list):
|
||||||
all_processes = list(telemetry_json['data']['process_list'].items())
|
all_processes = list(process_list.items())
|
||||||
av_processes = []
|
av_processes = []
|
||||||
for process in all_processes:
|
for process in all_processes:
|
||||||
process_name = process[1]['name']
|
process_name = process[1]['name']
|
||||||
|
|
|
@ -30,7 +30,7 @@ let infectionMonkeyImage = require('../images/infection-monkey.svg');
|
||||||
let guardicoreLogoImage = require('../images/guardicore-logo.png');
|
let guardicoreLogoImage = require('../images/guardicore-logo.png');
|
||||||
let notificationIcon = require('../images/notification-logo-512x512.png');
|
let notificationIcon = require('../images/notification-logo-512x512.png');
|
||||||
|
|
||||||
const reportZeroTrustRoute = '/report/zero_trust';
|
const reportZeroTrustRoute = '/report/zeroTrust';
|
||||||
|
|
||||||
class AppComponent extends AuthComponent {
|
class AppComponent extends AuthComponent {
|
||||||
updateStatus = () => {
|
updateStatus = () => {
|
||||||
|
|
|
@ -30,7 +30,7 @@ class ConfigurePageComponent extends AuthComponent {
|
||||||
lastAction: 'none',
|
lastAction: 'none',
|
||||||
sections: [],
|
sections: [],
|
||||||
selectedSection: 'attack',
|
selectedSection: 'attack',
|
||||||
allMonkeysAreDead: true,
|
monkeysRan: false,
|
||||||
PBAwinFile: [],
|
PBAwinFile: [],
|
||||||
PBAlinuxFile: [],
|
PBAlinuxFile: [],
|
||||||
showAttackAlert: false
|
showAttackAlert: false
|
||||||
|
@ -363,13 +363,7 @@ class ConfigurePageComponent extends AuthComponent {
|
||||||
this.authFetch('/api')
|
this.authFetch('/api')
|
||||||
.then(res => res.json())
|
.then(res => res.json())
|
||||||
.then(res => {
|
.then(res => {
|
||||||
// This check is used to prevent unnecessary re-rendering
|
this.setState({monkeysRan: res['completed_steps']['run_monkey']});
|
||||||
let allMonkeysAreDead = (!res['completed_steps']['run_monkey']) || (res['completed_steps']['infection_done']);
|
|
||||||
if (allMonkeysAreDead !== this.state.allMonkeysAreDead) {
|
|
||||||
this.setState({
|
|
||||||
allMonkeysAreDead: allMonkeysAreDead
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -470,15 +464,15 @@ class ConfigurePageComponent extends AuthComponent {
|
||||||
</div>)
|
</div>)
|
||||||
};
|
};
|
||||||
|
|
||||||
renderRunningMonkeysWarning = () => {
|
renderConfigWontChangeWarning = () => {
|
||||||
return (<div>
|
return (<div>
|
||||||
{this.state.allMonkeysAreDead ?
|
{this.state.monkeysRan ?
|
||||||
'' :
|
|
||||||
<div className="alert alert-warning">
|
<div className="alert alert-warning">
|
||||||
<i className="glyphicon glyphicon-warning-sign" style={{'marginRight': '5px'}}/>
|
<i className="glyphicon glyphicon-warning-sign" style={{'marginRight': '5px'}}/>
|
||||||
Some monkeys are currently running. Note that changing the configuration will only apply to new
|
Changed configuration will only apply to new infections.
|
||||||
infections.
|
"Start over" to run again with different configuration.
|
||||||
</div>
|
</div>
|
||||||
|
: ''
|
||||||
}
|
}
|
||||||
</div>)
|
</div>)
|
||||||
};
|
};
|
||||||
|
@ -520,7 +514,7 @@ class ConfigurePageComponent extends AuthComponent {
|
||||||
{this.renderAttackAlertModal()}
|
{this.renderAttackAlertModal()}
|
||||||
<h1 className="page-title">Monkey Configuration</h1>
|
<h1 className="page-title">Monkey Configuration</h1>
|
||||||
{this.renderNav()}
|
{this.renderNav()}
|
||||||
{this.renderRunningMonkeysWarning()}
|
{this.renderConfigWontChangeWarning()}
|
||||||
{content}
|
{content}
|
||||||
<div className="text-center">
|
<div className="text-center">
|
||||||
<button type="submit" onClick={this.onSubmit} className="btn btn-success btn-lg" style={{margin: '5px'}}>
|
<button type="submit" onClick={this.onSubmit} className="btn btn-success btn-lg" style={{margin: '5px'}}>
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
import React from 'react';
|
import React from 'react';
|
||||||
import {Col, Modal} from 'react-bootstrap';
|
import {Col} from 'react-bootstrap';
|
||||||
import {Link} from 'react-router-dom';
|
import {Link} from 'react-router-dom';
|
||||||
import AuthComponent from '../AuthComponent';
|
import AuthComponent from '../AuthComponent';
|
||||||
|
import StartOverModal from '../ui-components/StartOverModal';
|
||||||
|
import '../../styles/StartOverPage.scss';
|
||||||
|
|
||||||
class StartOverPageComponent extends AuthComponent {
|
class StartOverPageComponent extends AuthComponent {
|
||||||
constructor(props) {
|
constructor(props) {
|
||||||
|
@ -12,6 +14,9 @@ class StartOverPageComponent extends AuthComponent {
|
||||||
showCleanDialog: false,
|
showCleanDialog: false,
|
||||||
allMonkeysAreDead: false
|
allMonkeysAreDead: false
|
||||||
};
|
};
|
||||||
|
|
||||||
|
this.cleanup = this.cleanup.bind(this);
|
||||||
|
this.closeModal = this.closeModal.bind(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
updateMonkeysRunning = () => {
|
updateMonkeysRunning = () => {
|
||||||
|
@ -25,48 +30,14 @@ class StartOverPageComponent extends AuthComponent {
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
renderCleanDialogModal = () => {
|
|
||||||
return (
|
|
||||||
<Modal show={this.state.showCleanDialog} onHide={() => this.setState({showCleanDialog: false})}>
|
|
||||||
<Modal.Body>
|
|
||||||
<h2>
|
|
||||||
<div className="text-center">Reset environment</div>
|
|
||||||
</h2>
|
|
||||||
<p style={{'fontSize': '1.2em', 'marginBottom': '2em'}}>
|
|
||||||
Are you sure you want to reset the environment?
|
|
||||||
</p>
|
|
||||||
{
|
|
||||||
!this.state.allMonkeysAreDead ?
|
|
||||||
<div className="alert alert-warning">
|
|
||||||
<i className="glyphicon glyphicon-warning-sign" style={{'marginRight': '5px'}}/>
|
|
||||||
Some monkeys are still running. It's advised to kill all monkeys before resetting.
|
|
||||||
</div>
|
|
||||||
:
|
|
||||||
<div/>
|
|
||||||
}
|
|
||||||
<div className="text-center">
|
|
||||||
<button type="button" className="btn btn-danger btn-lg" style={{margin: '5px'}}
|
|
||||||
onClick={() => {
|
|
||||||
this.cleanup();
|
|
||||||
this.setState({showCleanDialog: false});
|
|
||||||
}}>
|
|
||||||
Reset environment
|
|
||||||
</button>
|
|
||||||
<button type="button" className="btn btn-success btn-lg" style={{margin: '5px'}}
|
|
||||||
onClick={() => this.setState({showCleanDialog: false})}>
|
|
||||||
Cancel
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</Modal.Body>
|
|
||||||
</Modal>
|
|
||||||
)
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
render() {
|
render() {
|
||||||
return (
|
return (
|
||||||
<Col xs={12} lg={8}>
|
<Col xs={12} lg={8}>
|
||||||
{this.renderCleanDialogModal()}
|
<StartOverModal cleaned = {this.state.cleaned}
|
||||||
|
showCleanDialog = {this.state.showCleanDialog}
|
||||||
|
allMonkeysAreDead = {this.state.allMonkeysAreDead}
|
||||||
|
onVerify = {this.cleanup}
|
||||||
|
onClose = {this.closeModal}/>
|
||||||
<h1 className="page-title">Start Over</h1>
|
<h1 className="page-title">Start Over</h1>
|
||||||
<div style={{'fontSize': '1.2em'}}>
|
<div style={{'fontSize': '1.2em'}}>
|
||||||
<p>
|
<p>
|
||||||
|
@ -104,7 +75,7 @@ class StartOverPageComponent extends AuthComponent {
|
||||||
this.setState({
|
this.setState({
|
||||||
cleaned: false
|
cleaned: false
|
||||||
});
|
});
|
||||||
this.authFetch('/api?action=reset')
|
return this.authFetch('/api?action=reset')
|
||||||
.then(res => res.json())
|
.then(res => res.json())
|
||||||
.then(res => {
|
.then(res => {
|
||||||
if (res['status'] === 'OK') {
|
if (res['status'] === 'OK') {
|
||||||
|
@ -112,8 +83,14 @@ class StartOverPageComponent extends AuthComponent {
|
||||||
cleaned: true
|
cleaned: true
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
}).then(this.updateMonkeysRunning());
|
||||||
}
|
};
|
||||||
|
|
||||||
|
closeModal = () => {
|
||||||
|
this.setState({
|
||||||
|
showCleanDialog: false
|
||||||
|
})
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
export default StartOverPageComponent;
|
export default StartOverPageComponent;
|
||||||
|
|
|
@ -0,0 +1,74 @@
|
||||||
|
import {Modal} from 'react-bootstrap';
|
||||||
|
import React from 'react';
|
||||||
|
import {GridLoader} from 'react-spinners';
|
||||||
|
|
||||||
|
|
||||||
|
class StartOverModal extends React.PureComponent {
|
||||||
|
|
||||||
|
constructor(props) {
|
||||||
|
super(props);
|
||||||
|
|
||||||
|
this.state = {
|
||||||
|
showCleanDialog: this.props.showCleanDialog,
|
||||||
|
allMonkeysAreDead: this.props.allMonkeysAreDead,
|
||||||
|
loading: false
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
componentDidUpdate(prevProps) {
|
||||||
|
if (this.props !== prevProps) {
|
||||||
|
this.setState({ showCleanDialog: this.props.showCleanDialog,
|
||||||
|
allMonkeysAreDead: this.props.allMonkeysAreDead})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
render = () => {
|
||||||
|
return (
|
||||||
|
<Modal show={this.state.showCleanDialog} onHide={() => this.props.onClose()}>
|
||||||
|
<Modal.Body>
|
||||||
|
<h2>
|
||||||
|
<div className='text-center'>Reset environment</div>
|
||||||
|
</h2>
|
||||||
|
<p style={{'fontSize': '1.2em', 'marginBottom': '2em'}}>
|
||||||
|
Are you sure you want to reset the environment?
|
||||||
|
</p>
|
||||||
|
{
|
||||||
|
!this.state.allMonkeysAreDead ?
|
||||||
|
<div className='alert alert-warning'>
|
||||||
|
<i className='glyphicon glyphicon-warning-sign' style={{'marginRight': '5px'}}/>
|
||||||
|
Some monkeys are still running. It's advised to kill all monkeys before resetting.
|
||||||
|
</div>
|
||||||
|
:
|
||||||
|
<div/>
|
||||||
|
}
|
||||||
|
{
|
||||||
|
this.state.loading ? <div className={'modalLoader'}><GridLoader/></div> : this.showModalButtons()
|
||||||
|
}
|
||||||
|
</Modal.Body>
|
||||||
|
</Modal>
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
showModalButtons() {
|
||||||
|
return (<div className='text-center'>
|
||||||
|
<button type='button' className='btn btn-danger btn-lg' style={{margin: '5px'}}
|
||||||
|
onClick={this.modalVerificationOnClick}>
|
||||||
|
Reset environment
|
||||||
|
</button>
|
||||||
|
<button type='button' className='btn btn-success btn-lg' style={{margin: '5px'}}
|
||||||
|
onClick={() => this.setState({showCleanDialog: false})}>
|
||||||
|
Cancel
|
||||||
|
</button>
|
||||||
|
</div>)
|
||||||
|
}
|
||||||
|
|
||||||
|
modalVerificationOnClick = async () => {
|
||||||
|
this.setState({loading: true});
|
||||||
|
this.props.onVerify()
|
||||||
|
.then(() => {this.setState({loading: false});
|
||||||
|
this.props.onClose();})
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default StartOverModal;
|
|
@ -0,0 +1,9 @@
|
||||||
|
$yellow: #ffcc00;
|
||||||
|
|
||||||
|
.modalLoader div{
|
||||||
|
margin-left: auto;
|
||||||
|
margin-right: auto;
|
||||||
|
}
|
||||||
|
.modalLoader div>div{
|
||||||
|
background-color: $yellow;
|
||||||
|
}
|
|
@ -1,8 +1,8 @@
|
||||||
Package: gc-monkey-island
|
Package: gc-monkey-island
|
||||||
Architecture: amd64
|
Architecture: amd64
|
||||||
Maintainer: Guardicore
|
Maintainer: Guardicore <support@infectionmonkey.com>
|
||||||
Homepage: http://www.guardicore.com
|
Homepage: https://www.infectionmonkey.com
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Version: 1.0
|
Version: 1.0
|
||||||
Description: Guardicore Infection Monkey Island installation package
|
Description: Guardicore Infection Monkey Island installation package
|
||||||
Depends: openssl, python-pip, python-dev
|
Depends: openssl, python3-pip, python3-dev
|
||||||
|
|
|
@ -5,8 +5,8 @@ INSTALLATION_FOLDER=/var/monkey/monkey_island/installation
|
||||||
PYTHON_FOLDER=/var/monkey/monkey_island/bin/python
|
PYTHON_FOLDER=/var/monkey/monkey_island/bin/python
|
||||||
|
|
||||||
# Prepare python virtualenv
|
# Prepare python virtualenv
|
||||||
pip2 install virtualenv --no-index --find-links file://$INSTALLATION_FOLDER
|
pip3 install virtualenv --no-index --find-links file://$INSTALLATION_FOLDER
|
||||||
virtualenv -p python2.7 ${PYTHON_FOLDER}
|
virtualenv -p python3 ${PYTHON_FOLDER}
|
||||||
|
|
||||||
# install pip requirements
|
# install pip requirements
|
||||||
${PYTHON_FOLDER}/bin/python -m pip install -r $MONKEY_FOLDER/monkey_island/requirements.txt --no-index --find-links file://$INSTALLATION_FOLDER
|
${PYTHON_FOLDER}/bin/python -m pip install -r $MONKEY_FOLDER/monkey_island/requirements.txt --no-index --find-links file://$INSTALLATION_FOLDER
|
||||||
|
|
|
@ -5,8 +5,8 @@ INSTALLATION_FOLDER=/var/monkey/monkey_island/installation
|
||||||
PYTHON_FOLDER=/var/monkey/monkey_island/bin/python
|
PYTHON_FOLDER=/var/monkey/monkey_island/bin/python
|
||||||
|
|
||||||
# Prepare python virtualenv
|
# Prepare python virtualenv
|
||||||
pip2 install virtualenv --no-index --find-links file://$INSTALLATION_FOLDER
|
pip3 install virtualenv --no-index --find-links file://$INSTALLATION_FOLDER
|
||||||
virtualenv -p python2.7 ${PYTHON_FOLDER}
|
virtualenv -p python3 ${PYTHON_FOLDER}
|
||||||
|
|
||||||
# install pip requirements
|
# install pip requirements
|
||||||
${PYTHON_FOLDER}/bin/python -m pip install -r $MONKEY_FOLDER/monkey_island/requirements.txt --no-index --find-links file://$INSTALLATION_FOLDER
|
${PYTHON_FOLDER}/bin/python -m pip install -r $MONKEY_FOLDER/monkey_island/requirements.txt --no-index --find-links file://$INSTALLATION_FOLDER
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
MACHINE_TYPE=`uname -m`
|
MACHINE_TYPE=$(uname -m)
|
||||||
if [ ${MACHINE_TYPE} == 'x86_64' ]; then
|
if [ "${MACHINE_TYPE}" == 'x86_64' ]; then
|
||||||
# 64-bit stuff here
|
# 64-bit stuff here
|
||||||
ARCH=64
|
ARCH=64
|
||||||
else
|
else
|
||||||
|
@ -11,4 +11,4 @@ fi
|
||||||
|
|
||||||
MONKEY_FILE=monkey-linux-$ARCH
|
MONKEY_FILE=monkey-linux-$ARCH
|
||||||
cp -f /var/monkey/monkey_island/cc/binaries/$MONKEY_FILE /tmp
|
cp -f /var/monkey/monkey_island/cc/binaries/$MONKEY_FILE /tmp
|
||||||
/tmp/$MONKEY_FILE m0nk3y $@
|
/tmp/$MONKEY_FILE m0nk3y "$@"
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
MACHINE_TYPE=`uname -m`
|
MACHINE_TYPE=$(uname -m)
|
||||||
if [ ${MACHINE_TYPE} == 'x86_64' ]; then
|
if [ "${MACHINE_TYPE}" == 'x86_64' ]; then
|
||||||
# 64-bit stuff here
|
# 64-bit stuff here
|
||||||
ARCH=64
|
ARCH=64
|
||||||
else
|
else
|
||||||
|
@ -11,4 +11,4 @@ fi
|
||||||
|
|
||||||
MONKEY_FILE=monkey-linux-$ARCH
|
MONKEY_FILE=monkey-linux-$ARCH
|
||||||
cp -f /var/monkey/monkey_island/cc/binaries/$MONKEY_FILE /tmp
|
cp -f /var/monkey/monkey_island/cc/binaries/$MONKEY_FILE /tmp
|
||||||
/tmp/$MONKEY_FILE m0nk3y $@
|
/tmp/$MONKEY_FILE m0nk3y "$@"
|
||||||
|
|
Loading…
Reference in New Issue