diff --git a/monkey/infection_monkey/credential_collectors/mimikatz_collector/mimikatz_credential_collector.py b/monkey/infection_monkey/credential_collectors/mimikatz_collector/mimikatz_credential_collector.py index 0ef75ed1b..1b772580d 100644 --- a/monkey/infection_monkey/credential_collectors/mimikatz_collector/mimikatz_credential_collector.py +++ b/monkey/infection_monkey/credential_collectors/mimikatz_collector/mimikatz_credential_collector.py @@ -11,7 +11,6 @@ logger = logging.getLogger(__name__) class MimikatzCredentialCollector(ICredentialCollector): - def collect_credentials(self, options=None) -> Sequence[Credentials]: logger.info("Attempting to collect windows credentials with pypykatz.") creds = pypykatz_handler.get_windows_creds() diff --git a/monkey/infection_monkey/credential_collectors/ssh_collector/ssh_handler.py b/monkey/infection_monkey/credential_collectors/ssh_collector/ssh_handler.py index 5dba2bbf3..98ca0df4a 100644 --- a/monkey/infection_monkey/credential_collectors/ssh_collector/ssh_handler.py +++ b/monkey/infection_monkey/credential_collectors/ssh_collector/ssh_handler.py @@ -30,6 +30,7 @@ def get_ssh_info(telemetry_messenger: ITelemetryMessenger) -> Iterable[Dict]: def _get_home_dirs() -> Iterable[Dict]: import pwd + root_dir = _get_ssh_struct("root", "") home_dirs = [ _get_ssh_struct(x.pw_name, x.pw_dir) for x in pwd.getpwall() if x.pw_dir.startswith("/home") diff --git a/monkey/infection_monkey/exploit/drupal.py b/monkey/infection_monkey/exploit/drupal.py index a0842383e..fec12c5b2 100644 --- a/monkey/infection_monkey/exploit/drupal.py +++ b/monkey/infection_monkey/exploit/drupal.py @@ -145,12 +145,12 @@ class DrupalExploiter(WebRCE): def is_response_cached(r: requests.Response) -> bool: - """ Check if a response had the cache header. """ + """Check if a response had the cache header.""" return "X-Drupal-Cache" in r.headers and r.headers["X-Drupal-Cache"] == "HIT" def find_exploitbale_article_ids(base_url: str, lower: int = 1, upper: int = 100) -> set: - """ Find target articles that do not 404 and are not cached """ + """Find target articles that do not 404 and are not cached""" articles = set() while lower < upper: node_url = urljoin(base_url, str(lower))