Changed the post_exec analyzer to not work with Analyzer log but rather work with regular log

This commit is contained in:
Shay Nehmad 2020-02-25 12:19:57 +02:00
parent 97976cdbc5
commit e815ac53da
3 changed files with 17 additions and 20 deletions

View File

@ -2,11 +2,10 @@ import logging
from datetime import timedelta from datetime import timedelta
from envs.monkey_zoo.blackbox.analyzers.analyzer import Analyzer from envs.monkey_zoo.blackbox.analyzers.analyzer import Analyzer
from envs.monkey_zoo.blackbox.analyzers.analyzer_log import AnalyzerLog
from envs.monkey_zoo.blackbox.island_client.monkey_island_client import MonkeyIslandClient from envs.monkey_zoo.blackbox.island_client.monkey_island_client import MonkeyIslandClient
MAX_ALLOWED_SINGLE_PAGE_TIME = timedelta(seconds=1) MAX_ALLOWED_SINGLE_PAGE_TIME = timedelta(seconds=2)
MAX_ALLOWED_TOTAL_TIME = timedelta(seconds=3) MAX_ALLOWED_TOTAL_TIME = timedelta(seconds=5)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -16,13 +15,10 @@ class PerformanceAnalyzer(Analyzer):
def __init__(self, island_client: MonkeyIslandClient, break_if_took_too_long=False): def __init__(self, island_client: MonkeyIslandClient, break_if_took_too_long=False):
self.break_if_took_too_long = break_if_took_too_long self.break_if_took_too_long = break_if_took_too_long
self.island_client = island_client self.island_client = island_client
self.log = AnalyzerLog(self.__class__.__name__)
def analyze_test_results(self) -> bool: def analyze_test_results(self) -> bool:
self.log.clear()
if not self.island_client.is_all_monkeys_dead(): if not self.island_client.is_all_monkeys_dead():
self.log.add_entry("Can't test report times since not all Monkeys have died.") logger.info("Can't test report times since not all Monkeys have died.")
return False return False
total_time = timedelta() total_time = timedelta()
@ -33,20 +29,22 @@ class PerformanceAnalyzer(Analyzer):
single_page_time_less_then_max = True single_page_time_less_then_max = True
for page, elapsed in timings.items(): for page, elapsed in timings.items():
self.log.add_entry(f"page {page} took {str(elapsed)}") logger.info(f"page {page} took {str(elapsed)}")
total_time += elapsed total_time += elapsed
if elapsed > MAX_ALLOWED_SINGLE_PAGE_TIME: if elapsed > MAX_ALLOWED_SINGLE_PAGE_TIME:
single_page_time_less_then_max = False single_page_time_less_then_max = False
total_time_less_then_max = total_time < MAX_ALLOWED_TOTAL_TIME total_time_less_then_max = total_time < MAX_ALLOWED_TOTAL_TIME
self.log.add_entry(f"total time is {str(total_time)}") logger.info(f"total time is {str(total_time)}")
if self.break_if_took_too_long and (not (total_time_less_then_max and single_page_time_less_then_max)): performance_is_good_enough = total_time_less_then_max and single_page_time_less_then_max
if self.break_if_took_too_long and not performance_is_good_enough:
logger.warning( logger.warning(
"Calling breakpoint - pausing to enable investigation of island. Type 'c' to continue once you're done " "Calling breakpoint - pausing to enable investigation of island. Type 'c' to continue once you're done "
"investigating. type 'p timings' and 'p total_time' to see performance information." "investigating. Type 'p timings' and 'p total_time' to see performance information."
) )
breakpoint() breakpoint()
return total_time_less_then_max and single_page_time_less_then_max return performance_is_good_enough

View File

@ -98,6 +98,10 @@ class MonkeyIslandClient(object):
return response return response
def time_all_report_pages(self): def time_all_report_pages(self):
"""
Calculates elapsed time of request for each report URL
Make sure to call clear_caches before this function if you want to measure "worst case" generation time.
"""
REPORT_URLS = [ REPORT_URLS = [
"api/report/security", "api/report/security",
"api/attack/report", "api/attack/report",
@ -111,12 +115,11 @@ class MonkeyIslandClient(object):
for url in REPORT_URLS: for url in REPORT_URLS:
response = self.requests.get(url) response = self.requests.get(url)
if response.ok: if response.ok:
LOGGER.debug(f"Got ok for {url} content peek:\n{response.content[:120]}") LOGGER.debug(f"Got ok for {url} content peek:\n{response.content[:120].strip()}")
report_resource_to_response_time[url] = response.elapsed report_resource_to_response_time[url] = response.elapsed
else: else:
LOGGER.error(f"Trying to get {url} but got unexpected {str(response)}") LOGGER.error(f"Trying to get {url} but got unexpected {str(response)}")
# instead of raising for status, mark failed responses as maxtime # instead of raising for status, mark failed responses as maxtime
report_resource_to_response_time[url] = timedelta.max() report_resource_to_response_time[url] = timedelta.max()
return report_resource_to_response_time return report_resource_to_response_time

View File

@ -1,4 +1,3 @@
import json
from time import sleep from time import sleep
import logging import logging
@ -68,7 +67,7 @@ class BasicTest(object):
def get_analyzer_logs(self): def get_analyzer_logs(self):
log = "" log = ""
for analyzer in self.get_all_analyzers(): for analyzer in self.analyzers:
log += "\n" + analyzer.log.get_contents() log += "\n" + analyzer.log.get_contents()
return log return log
@ -93,12 +92,9 @@ class BasicTest(object):
If we try to launch monkey during that time window monkey will fail to start, that's If we try to launch monkey during that time window monkey will fail to start, that's
why test needs to wait a bit even after all monkeys are dead. why test needs to wait a bit even after all monkeys are dead.
""" """
LOGGER.debug() LOGGER.debug("Waiting for Monkey process to close...")
sleep(TIME_FOR_MONKEY_PROCESS_TO_FINISH) sleep(TIME_FOR_MONKEY_PROCESS_TO_FINISH)
def test_post_exec_analyzers(self): def test_post_exec_analyzers(self):
post_exec_analyzers_results = [analyzer.analyze_test_results() for analyzer in self.post_exec_analyzers] post_exec_analyzers_results = [analyzer.analyze_test_results() for analyzer in self.post_exec_analyzers]
assert all(post_exec_analyzers_results) assert all(post_exec_analyzers_results)
def get_all_analyzers(self):
return self.analyzers + self.post_exec_analyzers