From e5bd17cc51877d1c17a9549e6569623adda16036 Mon Sep 17 00:00:00 2001 From: Dylan Evans <62859381+fin3ss3g0d@users.noreply.github.com> Date: Fri, 11 Jul 2025 16:16:21 -0400 Subject: [PATCH 1/3] Create query-importer.py --- utilities/python/query-importer.py | 125 +++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 utilities/python/query-importer.py diff --git a/utilities/python/query-importer.py b/utilities/python/query-importer.py new file mode 100644 index 0000000..5f5f907 --- /dev/null +++ b/utilities/python/query-importer.py @@ -0,0 +1,125 @@ +import requests +import json +import base64 +import hmac +import hashlib +import datetime +import argparse +import sys +import os +import time + +MAX_RETRIES = 5 + +def build_signature(method: str, uri: str, body: bytes, token_key: str) -> (str, str): + digester = hmac.new(token_key.encode(), None, hashlib.sha256) + digester.update(f'{method}{uri}'.encode()) + + digester = hmac.new(digester.digest(), None, hashlib.sha256) + + now = datetime.datetime.now().astimezone() + date_header = now.isoformat('T') + digester.update(date_header[:13].encode()) + + digester = hmac.new(digester.digest(), None, hashlib.sha256) + + if body: + digester.update(body) + + signature = base64.b64encode(digester.digest()).decode() + return date_header, signature + +def load_queries(file_path: str): + if not os.path.isfile(file_path): + raise FileNotFoundError(f"Query file not found: {file_path}") + + with open(file_path, "r") as file: + try: + raw_data = json.load(file) + except json.JSONDecodeError as e: + raise ValueError(f"Invalid JSON format: {e}") + + if not isinstance(raw_data, list): + raise ValueError("Expected a list of query objects in the JSON file") + + flattened_queries = [] + for idx, entry in enumerate(raw_data): + try: + name = entry.get("name", f"Unnamed Query {idx + 1}") + category = entry.get("category", "Uncategorized") + description = entry.get("description") or f"{name} - {category}" + query = entry.get("query") + + if not query: + print(f"[!] Skipping query '{name}' (missing 'query' field)") + continue + + flattened_queries.append({ + "name": name, + "description": description, + "query": query + }) + + except Exception as e: + print(f"[!] Error processing query at index {idx}: {e}") + + print(f"[+] Loaded {len(flattened_queries)} queries from {file_path}") + return flattened_queries + +def submit_query_with_retries(url, headers, body, query_name): + for attempt in range(1, MAX_RETRIES + 1): + response = requests.post(url=url, headers=headers, data=body) + + if response.status_code != 429: + print(f" Status: {response.status_code} - {response.text}") + return + + retry_after = response.headers.get("Retry-After") + if retry_after: + wait_time = int(retry_after) + print(f"[!] 429 received. Retry-After: {wait_time}s (Attempt {attempt}/{MAX_RETRIES})") + else: + wait_time = 2 ** attempt # Exponential backoff + print(f"[!] 429 received. No Retry-After header. Waiting {wait_time}s (Attempt {attempt}/{MAX_RETRIES})") + + time.sleep(wait_time) + + print(f"[X] Failed to submit query '{query_name}' after {MAX_RETRIES} retries due to rate limiting.") + +def main(): + parser = argparse.ArgumentParser(description="Submit BloodHound queries via API from Queries.json") + parser.add_argument("--token-id", required=True, help="API token ID") + parser.add_argument("--token-key", required=True, help="API token key") + parser.add_argument("--queries-file", required=True, help="Path to JSON file containing custom queries") + parser.add_argument("--base-url", default="http://127.0.0.1:8080", help="BloodHound API base URL") + + args = parser.parse_args() + + try: + queries = load_queries(args.queries_file) + + for query in queries: + body = json.dumps(query).encode() + uri = "/api/v2/saved-queries" + method = "POST" + url = args.base_url + uri + + request_date, signature = build_signature(method, uri, body, args.token_key) + + headers = { + "Authorization": f"bhesignature {args.token_id}", + "RequestDate": request_date, + "Signature": signature, + "Content-Type": "application/json", + "User-Agent": "bhe-python-sdk 0001", + } + + print(f"[+] Submitting query: {query['name']}") + submit_query_with_retries(url, headers, body, query['name']) + + except Exception as e: + print(f"[!] Error: {e}") + sys.exit(1) + +if __name__ == "__main__": + main() From c077732c7b90e82534488e1621fd17973f55779c Mon Sep 17 00:00:00 2001 From: Dylan Evans <62859381+fin3ss3g0d@users.noreply.github.com> Date: Fri, 11 Jul 2025 16:24:26 -0400 Subject: [PATCH 2/3] Update README.md --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index a03d8ab..a862069 100644 --- a/README.md +++ b/README.md @@ -110,6 +110,10 @@ Example: Import a few queries to BloodHound's Custom Searches: > $queries[0..4] | New-BHPathQuery ``` +## Query Importer + +There is a dedicated script included with this repository for importing all queries into BloodHound Community Edition located at `utilities/python/query-importer.py` based on the master `Queries.json` file for easy plug-and-play usage of the queries bundled in this repository. + ## Contributing The BloodHound Query Library's success depends on community participation. BloodHound users who have developed useful queries are encouraged to contribute them to the library. From acf7165c7956fd9f91977defd79a232993ab7ea0 Mon Sep 17 00:00:00 2001 From: Dylan Evans <62859381+fin3ss3g0d@users.noreply.github.com> Date: Thu, 24 Jul 2025 11:04:52 -0400 Subject: [PATCH 3/3] Update query-importer.py Allow users to import by platform category --- utilities/python/query-importer.py | 73 ++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 20 deletions(-) diff --git a/utilities/python/query-importer.py b/utilities/python/query-importer.py index 5f5f907..8f41f70 100644 --- a/utilities/python/query-importer.py +++ b/utilities/python/query-importer.py @@ -29,42 +29,65 @@ def build_signature(method: str, uri: str, body: bytes, token_key: str) -> (str, signature = base64.b64encode(digester.digest()).decode() return date_header, signature -def load_queries(file_path: str): +def load_queries(file_path: str, platforms_filter: list[str] | None = None): + """ + Read the SpecterOps query-library JSON and return only the queries whose + 'platforms' list intersects with `platforms_filter` (case-insensitive). + + Args: + file_path: Path to the master queries JSON. + platforms_filter: List of platform strings (lower-cased). If None or + empty, no filtering is applied. + + Returns: + List[dict] with keys: name, description, query + """ if not os.path.isfile(file_path): raise FileNotFoundError(f"Query file not found: {file_path}") - with open(file_path, "r") as file: + with open(file_path, "r") as fh: try: - raw_data = json.load(file) + raw_data = json.load(fh) except json.JSONDecodeError as e: raise ValueError(f"Invalid JSON format: {e}") if not isinstance(raw_data, list): - raise ValueError("Expected a list of query objects in the JSON file") + raise ValueError("Expected a list of query objects at top level") - flattened_queries = [] + flattened = [] + filter_active = bool(platforms_filter) + skipped_platform = 0 for idx, entry in enumerate(raw_data): try: - name = entry.get("name", f"Unnamed Query {idx + 1}") - category = entry.get("category", "Uncategorized") - description = entry.get("description") or f"{name} - {category}" - query = entry.get("query") + # ----- platform gating ------------------------------------------------ + entry_platforms = [p.lower() for p in entry.get("platforms", [])] + if filter_active and not any(p in entry_platforms for p in platforms_filter): + skipped_platform += 1 + continue - if not query: - print(f"[!] Skipping query '{name}' (missing 'query' field)") + query_text = entry.get("query") + if not query_text: + print(f"[!] Skipping entry #{idx+1} (missing 'query' field)") continue - flattened_queries.append({ - "name": name, + name = entry.get("name", f"Unnamed Query {idx+1}") + category = entry.get("category", "Uncategorized") + description = entry.get("description") or f"{name} - {category}" + + flattened.append({ + "name": name, "description": description, - "query": query + "query": query_text }) - except Exception as e: - print(f"[!] Error processing query at index {idx}: {e}") + except Exception as exc: + print(f"[!] Error processing entry #{idx+1}: {exc}") - print(f"[+] Loaded {len(flattened_queries)} queries from {file_path}") - return flattened_queries + print(f"[+] Loaded {len(flattened)} queries from {file_path}" + f"{' after platform filtering' if filter_active else ''}") + if skipped_platform and filter_active: + print(f" [-] Skipped {skipped_platform} queries (platform mismatch)") + return flattened def submit_query_with_retries(url, headers, body, query_name): for attempt in range(1, MAX_RETRIES + 1): @@ -92,12 +115,22 @@ def main(): parser.add_argument("--token-key", required=True, help="API token key") parser.add_argument("--queries-file", required=True, help="Path to JSON file containing custom queries") parser.add_argument("--base-url", default="http://127.0.0.1:8080", help="BloodHound API base URL") + parser.add_argument( + "--platforms", + nargs="+", # allow 1 + values: --platforms "Active Directory" "Azure AD" + metavar="PLATFORM", + help="Import only queries that list one of these platform strings " + "(case-insensitive). If omitted, all queries are imported." + ) args = parser.parse_args() - try: - queries = load_queries(args.queries_file) + # Normalize platform filters to lower-case once + platform_filters = [p.lower() for p in (args.platforms or [])] + try: + queries = load_queries(args.queries_file, platform_filters if platform_filters else None) + for query in queries: body = json.dumps(query).encode() uri = "/api/v2/saved-queries"