Refactoring current tests

This commit is contained in:
VakarisZ 2020-03-30 10:45:42 +03:00
parent f561d4c604
commit 303dda1621
6 changed files with 190 additions and 128 deletions

View File

@ -15,7 +15,7 @@ REPORT_URLS = [
"api/report/zero_trust/pillars"
]
logger = logging.getLogger(__name__)
LOGGER = logging.getLogger(__name__)
class PerformanceAnalyzer(Analyzer):
@ -24,36 +24,14 @@ class PerformanceAnalyzer(Analyzer):
self.break_if_took_too_long = break_if_took_too_long
self.island_client = island_client
def analyze_test_results(self) -> bool:
if not self.island_client.is_all_monkeys_dead():
raise RuntimeError("Can't test report times since not all Monkeys have died.")
# Collect timings for all pages
self.island_client.clear_caches()
report_resource_to_response_time = {}
for url in REPORT_URLS:
report_resource_to_response_time[url] = self.island_client.get_elapsed_for_get_request(url)
# Calculate total time and check each page
single_page_time_less_then_max = True
total_time = timedelta()
for page, elapsed in report_resource_to_response_time.items():
logger.info(f"page {page} took {str(elapsed)}")
total_time += elapsed
if elapsed > MAX_ALLOWED_SINGLE_PAGE_TIME:
single_page_time_less_then_max = False
total_time_less_then_max = total_time < MAX_ALLOWED_TOTAL_TIME
logger.info(f"total time is {str(total_time)}")
performance_is_good_enough = total_time_less_then_max and single_page_time_less_then_max
if self.break_if_took_too_long and not performance_is_good_enough:
logger.warning(
"Calling breakpoint - pausing to enable investigation of island. Type 'c' to continue once you're done "
"investigating. Type 'p timings' and 'p total_time' to see performance information."
)
breakpoint()
return performance_is_good_enough
def get_elapsed_for_get_request(self, url):
response = self.island_client.requests.get(url)
if response.ok:
LOGGER.debug(f"Got ok for {url} content peek:\n{response.content[:120].strip()}")
return response.elapsed
else:
LOGGER.error(f"Trying to get {url} but got unexpected {str(response)}")
# instead of raising for status, mark failed responses as maxtime
return timedelta.max()

View File

@ -1,100 +1,8 @@
from time import sleep
import logging
from envs.monkey_zoo.blackbox.utils.test_timer import TestTimer
MAX_TIME_FOR_MONKEYS_TO_DIE = 5 * 60
WAIT_TIME_BETWEEN_REQUESTS = 10
TIME_FOR_MONKEY_PROCESS_TO_FINISH = 40
DELAY_BETWEEN_ANALYSIS = 3
LOGGER = logging.getLogger(__name__)
import abc
class BasicTest(object):
def __init__(self, name, island_client, config_parser, analyzers, timeout, post_exec_analyzers, log_handler):
self.name = name
self.island_client = island_client
self.config_parser = config_parser
self.analyzers = analyzers
self.post_exec_analyzers = post_exec_analyzers
self.timeout = timeout
self.log_handler = log_handler
class BasicTest(abc.ABC):
@abc.abstractmethod
def run(self):
self.island_client.import_config(self.config_parser.config_raw)
self.print_test_starting_info()
try:
self.island_client.run_monkey_local()
self.test_until_timeout()
finally:
self.island_client.kill_all_monkeys()
self.wait_until_monkeys_die()
self.wait_for_monkey_process_to_finish()
self.test_post_exec_analyzers()
self.parse_logs()
self.island_client.reset_env()
def print_test_starting_info(self):
LOGGER.info("Started {} test".format(self.name))
LOGGER.info("Machines participating in test: " + ", ".join(self.config_parser.get_ips_of_targets()))
print("")
def test_until_timeout(self):
timer = TestTimer(self.timeout)
while not timer.is_timed_out():
if self.all_analyzers_pass():
self.log_success(timer)
return
sleep(DELAY_BETWEEN_ANALYSIS)
LOGGER.debug("Waiting until all analyzers passed. Time passed: {}".format(timer.get_time_taken()))
self.log_failure(timer)
assert False
def log_success(self, timer):
LOGGER.info(self.get_analyzer_logs())
LOGGER.info("{} test passed, time taken: {:.1f} seconds.".format(self.name, timer.get_time_taken()))
def log_failure(self, timer):
LOGGER.info(self.get_analyzer_logs())
LOGGER.error("{} test failed because of timeout. Time taken: {:.1f} seconds.".format(self.name,
timer.get_time_taken()))
def all_analyzers_pass(self):
analyzers_results = [analyzer.analyze_test_results() for analyzer in self.analyzers]
return all(analyzers_results)
def get_analyzer_logs(self):
log = ""
for analyzer in self.analyzers:
log += "\n" + analyzer.log.get_contents()
return log
def wait_until_monkeys_die(self):
time_passed = 0
while not self.island_client.is_all_monkeys_dead() and time_passed < MAX_TIME_FOR_MONKEYS_TO_DIE:
sleep(WAIT_TIME_BETWEEN_REQUESTS)
time_passed += WAIT_TIME_BETWEEN_REQUESTS
LOGGER.debug("Waiting for all monkeys to die. Time passed: {}".format(time_passed))
if time_passed > MAX_TIME_FOR_MONKEYS_TO_DIE:
LOGGER.error("Some monkeys didn't die after the test, failing")
assert False
def parse_logs(self):
LOGGER.info("Parsing test logs:")
self.log_handler.parse_test_logs()
@staticmethod
def wait_for_monkey_process_to_finish():
"""
There is a time period when monkey is set to dead, but the process is still closing.
If we try to launch monkey during that time window monkey will fail to start, that's
why test needs to wait a bit even after all monkeys are dead.
"""
LOGGER.debug("Waiting for Monkey process to close...")
sleep(TIME_FOR_MONKEY_PROCESS_TO_FINISH)
def test_post_exec_analyzers(self):
post_exec_analyzers_results = [analyzer.analyze_test_results() for analyzer in self.post_exec_analyzers]
assert all(post_exec_analyzers_results)
pass

View File

@ -0,0 +1,95 @@
from time import sleep
import logging
from envs.monkey_zoo.blackbox.utils.test_timer import TestTimer
from envs.monkey_zoo.blackbox.tests.basic_test import BasicTest
MAX_TIME_FOR_MONKEYS_TO_DIE = 5 * 60
WAIT_TIME_BETWEEN_REQUESTS = 10
TIME_FOR_MONKEY_PROCESS_TO_FINISH = 40
DELAY_BETWEEN_ANALYSIS = 3
LOGGER = logging.getLogger(__name__)
class ExploitationTest(BasicTest):
def __init__(self, name, island_client, config_parser, analyzers, timeout, log_handler):
self.name = name
self.island_client = island_client
self.config_parser = config_parser
self.analyzers = analyzers
self.timeout = timeout
self.log_handler = log_handler
def run(self):
self.island_client.import_config(self.config_parser.config_raw)
self.print_test_starting_info()
try:
self.island_client.run_monkey_local()
self.test_until_timeout()
finally:
self.island_client.kill_all_monkeys()
self.wait_until_monkeys_die()
self.wait_for_monkey_process_to_finish()
self.parse_logs()
self.island_client.reset_env()
def print_test_starting_info(self):
LOGGER.info("Started {} test".format(self.name))
LOGGER.info("Machines participating in test: " + ", ".join(self.config_parser.get_ips_of_targets()))
print("")
def test_until_timeout(self):
timer = TestTimer(self.timeout)
while not timer.is_timed_out():
if self.all_analyzers_pass():
self.log_success(timer)
return
sleep(DELAY_BETWEEN_ANALYSIS)
LOGGER.debug("Waiting until all analyzers passed. Time passed: {}".format(timer.get_time_taken()))
self.log_failure(timer)
assert False
def log_success(self, timer):
LOGGER.info(self.get_analyzer_logs())
LOGGER.info("{} test passed, time taken: {:.1f} seconds.".format(self.name, timer.get_time_taken()))
def log_failure(self, timer):
LOGGER.info(self.get_analyzer_logs())
LOGGER.error("{} test failed because of timeout. Time taken: {:.1f} seconds.".format(self.name,
timer.get_time_taken()))
def all_analyzers_pass(self):
analyzers_results = [analyzer.analyze_test_results() for analyzer in self.analyzers]
return all(analyzers_results)
def get_analyzer_logs(self):
log = ""
for analyzer in self.analyzers:
log += "\n" + analyzer.log.get_contents()
return log
def wait_until_monkeys_die(self):
time_passed = 0
while not self.island_client.is_all_monkeys_dead() and time_passed < MAX_TIME_FOR_MONKEYS_TO_DIE:
sleep(WAIT_TIME_BETWEEN_REQUESTS)
time_passed += WAIT_TIME_BETWEEN_REQUESTS
LOGGER.debug("Waiting for all monkeys to die. Time passed: {}".format(time_passed))
if time_passed > MAX_TIME_FOR_MONKEYS_TO_DIE:
LOGGER.error("Some monkeys didn't die after the test, failing")
assert False
def parse_logs(self):
LOGGER.info("Parsing test logs:")
self.log_handler.parse_test_logs()
@staticmethod
def wait_for_monkey_process_to_finish():
"""
There is a time period when monkey is set to dead, but the process is still closing.
If we try to launch monkey during that time window monkey will fail to start, that's
why test needs to wait a bit even after all monkeys are dead.
"""
LOGGER.debug("Waiting for Monkey process to close...")
sleep(TIME_FOR_MONKEY_PROCESS_TO_FINISH)

View File

@ -0,0 +1,81 @@
import logging
from datetime import timedelta
from envs.monkey_zoo.blackbox.tests.basic_test import BasicTest
from envs.monkey_zoo.blackbox.tests.exploitation import ExploitationTest
MAX_ALLOWED_SINGLE_PAGE_TIME = timedelta(seconds=2)
MAX_ALLOWED_TOTAL_TIME = timedelta(seconds=5)
REPORT_URLS = [
"api/report/security",
"api/attack/report",
"api/report/zero_trust/findings",
"api/report/zero_trust/principles",
"api/report/zero_trust/pillars"
]
LOGGER = logging.getLogger(__name__)
class ReportGenerationTest(BasicTest):
def __init__(self, name, island_client, config_parser, analyzers,
timeout, log_handler, break_if_took_too_long=False):
self.name = name
self.island_client = island_client
self.config_parser = config_parser
self.exploitation_test = ExploitationTest(name, island_client, config_parser, analyzers, timeout, log_handler)
self.break_if_took_too_long = break_if_took_too_long
def test_report_generation_performance(self) -> bool:
if not self.island_client.is_all_monkeys_dead():
raise RuntimeError("Can't test report times since not all Monkeys have died.")
# Collect timings for all pages
self.island_client.clear_caches()
report_resource_to_response_time = {}
for url in REPORT_URLS:
report_resource_to_response_time[url] = self.island_client.get_elapsed_for_get_request(url)
# Calculate total time and check each page
single_page_time_less_then_max = True
total_time = timedelta()
for page, elapsed in report_resource_to_response_time.items():
LOGGER.info(f"page {page} took {str(elapsed)}")
total_time += elapsed
if elapsed > MAX_ALLOWED_SINGLE_PAGE_TIME:
single_page_time_less_then_max = False
total_time_less_then_max = total_time < MAX_ALLOWED_TOTAL_TIME
LOGGER.info(f"total time is {str(total_time)}")
performance_is_good_enough = total_time_less_then_max and single_page_time_less_then_max
if self.break_if_took_too_long and not performance_is_good_enough:
LOGGER.warning(
"Calling breakpoint - pausing to enable investigation of island. Type 'c' to continue once you're done "
"investigating. Type 'p timings' and 'p total_time' to see performance information."
)
breakpoint()
return performance_is_good_enough
def run(self):
self.island_client.import_config(self.config_parser.config_raw)
self.exploitation_test.print_test_starting_info()
try:
self.island_client.run_monkey_local()
self.exploitation_test.test_until_timeout()
finally:
self.island_client.kill_all_monkeys()
self.exploitation_test.wait_until_monkeys_die()
self.exploitation_test.wait_for_monkey_process_to_finish()
self.test_post_exec_analyzers()
self.exploitation_test.parse_logs()
self.island_client.reset_env()
def test_post_exec_analyzers(self):
post_exec_analyzers_results = [analyzer.analyze_test_results() for analyzer in self.post_exec_analyzers]
assert all(post_exec_analyzers_results)