From 35ac0f8bd632ebf4496f8995a4a062d2947dc086 Mon Sep 17 00:00:00 2001 From: Skelsec Date: Wed, 6 Dec 2023 16:10:15 +0100 Subject: [PATCH 1/6] adexplorer --- msldap/bloodhound.py | 744 ++++++++++++++++ msldap/client.py | 8 + msldap/commons/ldif.py | 127 +++ msldap/commons/utils.py | 15 - msldap/examples/msldapbloodhound.py | 648 +------------- msldap/examples/msldapclient.py | 476 ++++------ .../external/adexplorersnapshot/__init__.py | 0 .../adexplorersnapshot/parser/__init__.py | 0 .../adexplorersnapshot/parser/classes.py | 360 ++++++++ .../adexplorersnapshot/parser/snapshot.py | 833 ++++++++++++++++++ .../adexplorersnapshot/parser/structure.py | 130 +++ msldap/external/bloodhoundpy/utils.py | 55 ++ msldap/ldap_objects/adcontainer.py | 7 +- msldap/ldap_objects/adou.py | 7 +- msldap/protocol/typeconversion.py | 20 +- 15 files changed, 2463 insertions(+), 967 deletions(-) create mode 100644 msldap/bloodhound.py create mode 100644 msldap/commons/ldif.py create mode 100644 msldap/external/adexplorersnapshot/__init__.py create mode 100644 msldap/external/adexplorersnapshot/parser/__init__.py create mode 100644 msldap/external/adexplorersnapshot/parser/classes.py create mode 100644 msldap/external/adexplorersnapshot/parser/snapshot.py create mode 100644 msldap/external/adexplorersnapshot/parser/structure.py create mode 100644 msldap/external/bloodhoundpy/utils.py diff --git a/msldap/bloodhound.py b/msldap/bloodhound.py new file mode 100644 index 0000000..3415c32 --- /dev/null +++ b/msldap/bloodhound.py @@ -0,0 +1,744 @@ +import os +import zipfile +import json +import base64 +import datetime + +from tqdm import tqdm + +from msldap.external.bloodhoundpy.acls import parse_binary_acl +from msldap.external.bloodhoundpy.resolver import resolve_aces, WELLKNOWN_SIDS +from msldap.external.bloodhoundpy.utils import parse_gplink_string, is_filtered_container, is_filtered_container_child, reverse_dn_components, explode_dn +from msldap.commons.factory import LDAPConnectionFactory +from msldap.connection import MSLDAPClientConnection +from msldap.client import MSLDAPClient +from msldap.external.adexplorersnapshot.parser.snapshot import Snapshot + +async def dummy_print(msg): + print(msg) + +class MSLDAPDump2Bloodhound: + def __init__(self, url: str or MSLDAPClient or LDAPConnectionFactory or MSLDAPClientConnection, progress = True, output_path = None, print_cb = None): + self.debug = False + self.ldap_url = url + self.connection: MSLDAPClient = None + self.ldapinfo = None + self.domainname = None + self.domainsid = None + self.print_cb = print_cb + self.with_progress = progress + if self.print_cb is None: + self.print_cb = dummy_print + + self.DNs = {} + self.DNs_sorted = {} + self.ocache = {} + self.schema = {} + self.aces = {} + self.computer_sidcache = {} + self.token_map = {} + + self.curdate = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S') + self.zipfilepath = '%s_Bloodhound.zip' % self.curdate + if output_path is not None: + self.zipfilepath = os.path.join(output_path, self.zipfilepath) + self.zipfile = None + self.MAX_ENTRIES_PER_FILE = 40000 + + self.totals = { + 'user' : 0, + 'computer' : 0, + 'group' : 0, + 'ou' : 0, + 'gpo' : 0, + 'container' : 0, + 'domain' : 0, + 'trust' : 0 + } + + async def print(self, msg:str): + await self.print_cb(msg) + + + async def create_progress(self, label, total = None): + if self.with_progress is True: + return tqdm(desc = label, total=total) + else: + await self.print('[+] %s' % label) + return None + + async def update_progress(self, pbar, value = 1): + if pbar is None: + return + if self.with_progress is True: + pbar.update(value) + + async def close_progress(self, pbar): + if pbar is None: + return + if self.with_progress is True: + pbar.close() + + def split_json(self, enumtype, data): + if data['meta']['count'] <= self.MAX_ENTRIES_PER_FILE: + yield data + return + + #split the data + for i in range(0, data['meta']['count'], self.MAX_ENTRIES_PER_FILE): + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': enumtype, + 'version': 5, + 'count': 0 + } + } + for entry in data['data'][i:i+self.MAX_ENTRIES_PER_FILE]: + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + yield jsonstruct + + + async def write_json_to_zip(self, enumtype, data): + filepart = 0 + for chunk in self.split_json(enumtype, data): + if filepart == 0: + filename = '%s_%s.json' % (self.curdate, enumtype) + else: + filename = '%s_%s_%02d.json' % (self.curdate, enumtype, filepart) + self.zipfile.writestr(filename, json.dumps(chunk)) + filepart += 1 + + async def lookup_dn_children(self, parent_dn): + parent_dn = parent_dn.upper() + parent_dn_reversed = reverse_dn_components(parent_dn) + if parent_dn not in self.DNs: + await self.print('DN not found: %s' % parent_dn_reversed) + return [] + + branch = self.DNs_sorted + level = 0 + for part in explode_dn(parent_dn_reversed): + level += 1 + if part not in branch: + await self.print('Part not found: %s Full: %s Branch: %s Level: %s Parts: %s' % (part, parent_dn_reversed, branch.keys(), level, explode_dn(parent_dn_reversed))) + return [] + branch = branch[part] + + res_dns = [] + for dnpart in branch: + res_dns.append(dnpart + ',' + parent_dn) + + results = [] + for tdn in res_dns: + if is_filtered_container_child(tdn): + continue + if tdn not in self.DNs: + #await self.print('Missing %s' % tdn) + #continue + attrs, err = await self.connection.dnattrs(tdn, ['distinguishedName','objectGUID', 'objectClass','sAMAaccountType', 'sAMAccountName', 'objectSid', 'name']) + res = self.resolve_entry(attrs) + results.append({ + 'ObjectIdentifier': res['objectid'].upper(), + 'ObjectType': res['type'].capitalize(), + }) + continue + entry = self.ocache[self.DNs[tdn]] + results.append({ + 'ObjectIdentifier': entry['ObjectIdentifier'].upper(), + 'ObjectType': entry['ObjectType'].capitalize() if entry['ObjectType'].lower() != 'ou' else 'OU', + }) + + return results + + async def dump_schema(self): + pbar = await self.create_progress('Dumping schema') + async for entry, err in self.connection.get_all_schemaentry(['name', 'schemaIDGUID']): + if err is not None: + raise err + await self.update_progress(pbar) + self.schema[entry.name.lower()] = str(entry.schemaIDGUID) + await self.close_progress(pbar) + + def add_ocache(self, dn, objectid, principal, otype, dns = '', spns = None): + self.totals[otype] += 1 + if objectid in WELLKNOWN_SIDS: + objectid = '%s-%s' % (self.domainname.upper(), objectid.upper()) + self.ocache[objectid] = { + 'dn' : dn.upper(), + 'ObjectIdentifier' : objectid, + 'principal' : principal, + 'ObjectType' : otype, + } + self.DNs[dn.upper()] = objectid + if otype == 'computer': + entry = { + 'ObjectIdentifier' : objectid, + 'ObjectType' : otype + } + if dns is None: + dns = '' + self.computer_sidcache[dns.lower()] = entry + if spns is not None: + for spn in spns: + target = spn.split('/')[1] + target = target.split(':')[0] + self.computer_sidcache[target.lower()] = entry + + def resolve_entry(self, entry): + # I really REALLY did not want to implement this + resolved = {} + account = entry.get('sAMAccountName', '') + dn = entry.get('distinguishedName', '') + resolved['objectid'] = entry.get('objectSid', '') + resolved['principal'] = ('%s@%s' % (account, self.domainname)).upper() + if 'sAMAaccountName' in entry: + accountType = entry['sAMAccountType'] + object_class = entry['objectClass'] + if accountType in [268435456, 268435457, 536870912, 536870913]: + resolved['type'] = 'Group' + elif accountType in [805306368] or \ + 'msDS-GroupManagedServiceAccount' in object_class or \ + 'msDS-ManagedServiceAccount' in object_class: + resolved['type'] = 'User' + elif accountType in [805306369]: + resolved['type'] = 'Computer' + short_name = account.rstrip('$') + resolved['principal'] = ('%s.%s' % (short_name, self.domainname)).upper() + elif accountType in [805306370]: + resolved['type'] = 'trustaccount' + else: + resolved['type'] = 'Domain' + return resolved + + if 'objectGUID' in entry: + resolved['objectid'] = entry['objectGUID'] + resolved['principal'] = ('%s@%s' % (entry.get('name', ''), self.domainname)).upper() + object_class = entry.get('objectClass', []) + if 'organizationalUnit' in object_class: + resolved['type'] = 'OU' + elif 'container' in object_class: + resolved['type'] = 'Container' + else: + resolved['type'] = 'Base' + return resolved + + async def dump_lookuptable(self): + pbar = await self.create_progress('Generating lookuptable') + # domains + adinfo, err = await self.connection.get_ad_info() + if err is not None: + raise err + self.domainsid = adinfo.objectSid + self.add_ocache(adinfo.distinguishedName, adinfo.objectSid, '', 'domain') + await self.update_progress(pbar) + + #trusts + async for entry, err in self.connection.get_all_trusts(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'trust') + await self.update_progress(pbar) + + #users + async for entry, err in self.connection.get_all_users(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName']): + if err is not None: + raise err + short_name = entry.sAMAccountName + self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'user') + await self.update_progress(pbar) + + #machines + async for entry, err in self.connection.get_all_machines(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName', 'dNSHostName', 'servicePrincipalName']): + if err is not None: + raise err + short_name = entry.sAMAccountName + dns = entry.dNSHostName + if dns is None: + dns = '' + + self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'computer', dns, entry.servicePrincipalName) + await self.update_progress(pbar) + + #groups + async for entry, err in self.connection.get_all_groups(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectSid, '', 'group') + await self.update_progress(pbar) + + #ous + async for entry, err in self.connection.get_all_ous(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'ou') + await self.update_progress(pbar) + + #containers + async for entry, err in self.connection.get_all_containers(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + if is_filtered_container(entry.distinguishedName): + continue + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'container') + await self.update_progress(pbar) + + #gpos + async for entry, err in self.connection.get_all_gpos(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'gpo') + await self.update_progress(pbar) + + #foreignsecurityprincipal + async for entry, err in self.connection.get_all_foreignsecurityprincipals(['name','sAMAccountName', 'objectSid', 'objectGUID', 'distinguishedName', 'objectClass']): + bhentry = {} + entry = entry['attributes'] + if entry['objectSid'] in WELLKNOWN_SIDS: + bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) + bhentry['principal'] = self.domainname.upper() + bhentry['type'] = 'foreignsecurityprincipal' + if 'name' in entry: + if entry['name'] in WELLKNOWN_SIDS: + gname, sidtype = WELLKNOWN_SIDS[entry['name']] + bhentry['type'] = sidtype.capitalize() + bhentry['principal'] = '%s@%s' % (gname.upper(), self.domainname.upper()) + bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) + else: + bhentry['objectid'] = entry['name'] + + self.ocache[bhentry['objectid']] = { + 'dn' : entry['distinguishedName'].upper(), + 'ObjectIdentifier' : bhentry['objectid'], + 'principal' : bhentry['principal'], + 'ObjectType' : bhentry['type'], + } + self.DNs[entry['distinguishedName'].upper()] = bhentry['objectid'] + + #await self.print(entry) + + await self.close_progress(pbar) + + for dn in [reverse_dn_components(dn) for dn in self.DNs]: + branch = self.DNs_sorted + for part in explode_dn(dn): + if part not in branch: + branch[part.upper()] = {} + branch = branch[part.upper()] + + if self.debug is True: + with open('dn.json', 'w') as f: + json.dump(self.DNs, f, indent=4) + + with open('dntree.json', 'w') as f: + json.dump(self.DNs_sorted, f, indent=4) + + async def dump_acls(self): + pbar = await self.create_progress('Dumping SDs', total=len(self.ocache)) + for sid in self.ocache: + dn = self.ocache[sid]['dn'] + secdesc, err = await self.connection.get_objectacl_by_dn(dn) + if err is not None: + raise err + dn = dn.upper() + oentry = { + 'IsACLProtected' : None, + 'Properties' : { + 'haslaps' : 'ms-mcs-admpwd' in self.schema + } + } + otype = self.ocache[sid]['ObjectType'] + if otype == 'trust': + continue + if otype == 'ou': + otype = 'organizational-unit' + if dn.upper() not in self.aces: + aces, relations = parse_binary_acl(oentry, otype.lower(), secdesc, self.schema) + self.aces[dn.upper()] = (aces, relations) + await self.update_progress(pbar) + await self.close_progress(pbar) + + async def resolve_gplink(self, gplinks): + if gplinks is None: + return [] + + links = [] + for gplink_dn, options in parse_gplink_string(gplinks): + link = {} + link['IsEnforced'] = options == 2 + if reverse_dn_components(gplink_dn.upper()) in self.DNs: + lguid = self.DNs[reverse_dn_components(gplink_dn.upper())]['ObjectIdentifier'] + else: + attrs, err = await self.connection.dnattrs(gplink_dn, ['objectGUID', 'objectSid']) + if err is not None: + raise err + lguid = attrs['objectGUID'] + link['GUID'] = lguid.upper() + links.append(link) + return links + + def remove_hidden(self, entry): + to_del = [] + for k in entry: + if k.startswith('_'): + to_del.append(k) + for k in to_del: + del entry[k] + return entry + + async def dump_domains(self): + pbar = await self.create_progress('Dumping domains', self.totals['domain']) + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': 'domains', + 'version': 5, + 'count': 0 + } + } + + adinfo, err = await self.connection.get_ad_info() + if err is not None: + raise err + + domainentry = adinfo.to_bh(self.domainname) + + meta, relations = self.aces[domainentry['Properties']['distinguishedname'].upper()] + domainentry['IsACLProtected'] = meta['IsACLProtected'] + domainentry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + domainentry['ChildObjects'] = await self.lookup_dn_children(domainentry['Properties']['distinguishedname']) + domainentry['Links'] = await self.resolve_gplink(domainentry['_gPLink']) + + async for entry, err in self.connection.get_all_trusts(): + if err is not None: + raise err + domainentry['Trusts'].append(entry.to_bh()) + + domainentry = self.remove_hidden(domainentry) + jsonstruct['data'].append(domainentry) + jsonstruct['meta']['count'] += 1 + await self.update_progress(pbar) + + await self.write_json_to_zip('domains', jsonstruct) + await self.close_progress(pbar) + if self.debug is True: + with open('domains.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_users(self): + pbar = await self.create_progress('Dumping users', self.totals['user']) + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': 'users', + 'version': 5, + 'count': 0 + } + } + + async for ldapentry, err in self.connection.get_all_users(): + try: + entry = ldapentry.to_bh(self.domainname) + except Exception as e: + print(ldapentry) + raise + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + + if entry['_allowerdtodelegateto'] is not None: + seen = [] + for host in entry['_allowerdtodelegateto']: + try: + target = host.split('/')[1] + target = target.split(':')[0] + except IndexError: + await self.print('[!] Invalid delegation target: %s', host) + continue + try: + sid = self.computer_sidcache[target.lower()] + if sid['ObjectIdentifier'] in seen: + continue + seen[sid['ObjectIdentifier']] = 1 + entry['AllowedToDelegate'].append(sid) + except KeyError: + if '.' in target: + entry['AllowedToDelegate'].append(target.upper()) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + await self.update_progress(pbar) + + await self.write_json_to_zip('users', jsonstruct) + await self.close_progress(pbar) + + if self.debug is True: + with open('users.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_computers(self): + pbar = await self.create_progress('Dumping computers', self.totals['computer']) + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': 'computers', + 'version': 5, + 'count': 0 + } + } + + async for ldapentry, err in self.connection.get_all_machines(): + entry = ldapentry.to_bh(self.domainname) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + + if entry['_allowedtoactonbehalfofotheridentity'] is not None: + allowedacl = base64.b64decode(entry['_allowedtoactonbehalfofotheridentity']) + entryres, relations = parse_binary_acl(entry, 'computer', allowedacl, self.schema) + + for ace in resolve_aces(relations, self.domainname, self.domainsid, self.ocache): + if ace['RightName'] == 'Owner': + continue + if ace['RightName'] == 'GenericAll': + entryres['AllowedToAct'].append({ + 'ObjectIdentifier': ace['PrincipalSID'], + 'ObjectType': ace['PrincipalType'].capitalize() + }) + + del entry['_allowedtoactonbehalfofotheridentity'] + if entry['Properties']['allowedtodelegate'] is not None: + seen = {} + for host in entry['Properties']['allowedtodelegate']: + try: + target = host.split('/')[1] + target = target.split(':')[0] + except IndexError: + await self.print('[!] Invalid delegation target: %s', host) + continue + try: + sid = self.computer_sidcache[target.lower()] + if sid['ObjectIdentifier'] in seen: + continue + seen[sid['ObjectIdentifier']] = 1 + entry['AllowedToDelegate'].append(sid) + except KeyError: + if '.' in target: + entry['AllowedToDelegate'].append({ + "ObjectIdentifier": target.upper(), + "ObjectType": "Computer" + }) + + entry = self.remove_hidden(entry) + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + await self.update_progress(pbar) + + await self.write_json_to_zip('computers', jsonstruct) + await self.close_progress(pbar) + + if self.debug is True: + with open('computers.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_groups(self): + pbar = await self.create_progress('Dumping groups', self.totals['group']) + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': 'groups', + 'version': 5, + 'count': 0 + } + } + + async for ldapentry, err in self.connection.get_all_groups(): + entry = ldapentry.to_bh(self.domainname) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + + if ldapentry.member is not None: + for member in ldapentry.member: + if member.upper() in self.DNs: + oid = self.DNs[member.upper()] + entry['Members'].append({ + 'ObjectIdentifier' : self.ocache[oid]['ObjectIdentifier'], + 'ObjectType' : self.ocache[oid]['ObjectType'].capitalize() + }) + else: + if member.find('ForeignSecurityPrincipals') != -1: + continue + + entry = self.remove_hidden(entry) + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + await self.update_progress(pbar) + + await self.write_json_to_zip('groups', jsonstruct) + await self.close_progress(pbar) + + if self.debug is True: + with open('groups.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_gpos(self): + pbar = await self.create_progress('Dumping GPOs', self.totals['gpo']) + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': 'gpos', + 'version': 5, + 'count': 0 + } + } + + async for ldapentry, err in self.connection.get_all_gpos(): + entry = ldapentry.to_bh(self.domainname, self.domainsid) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + await self.update_progress(pbar) + + await self.write_json_to_zip('gpos', jsonstruct) + await self.close_progress(pbar) + + if self.debug is True: + with open('gpos.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_ous(self): + pbar = await self.create_progress('Dumping OUs', self.totals['ou']) + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': 'ous', + 'version': 5, + 'count': 0 + } + } + + async for ldapentry, err in self.connection.get_all_ous(): + if err is not None: + raise err + entry = ldapentry.to_bh(self.domainname, self.domainsid) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) + entry['Links'] = await self.resolve_gplink(entry['_gPLink']) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + await self.update_progress(pbar) + + await self.write_json_to_zip('ous', jsonstruct) + await self.close_progress(pbar) + + if self.debug is True: + with open('ous.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_containers(self): + pbar = await self.create_progress('Dumping Containers', self.totals['container']) + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': 'containers', + 'version': 5, + 'count': 0 + } + } + async for ldapentry, err in self.connection.get_all_containers(): + if err is not None: + raise err + if is_filtered_container(ldapentry.distinguishedName): + continue + entry = ldapentry.to_bh(self.domainname, self.domainsid) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + await self.update_progress(pbar) + + await self.write_json_to_zip('containers', jsonstruct) + await self.close_progress(pbar) + + if self.debug is True: + with open('containers.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_ldap(self): + await self.print('[+] Connecting to LDAP server') + + if isinstance(self.ldap_url, str): + if self.ldap_url.startswith('adexplorer://'): + self.ldap_url = self.ldap_url[13:] + self.connection = await Snapshot.from_file(self.ldap_url) + self.ldap_url = self.connection + + if isinstance(self.ldap_url, Snapshot) is False: + if isinstance(self.ldap_url, str): + factory = LDAPConnectionFactory.from_url(self.ldap_url) + self.connection = factory.get_client() + self.connection.keepalive = True + if isinstance(self.ldap_url, LDAPConnectionFactory): + self.connection = self.ldap_url.get_client() + self.connection.keepalive = True + if isinstance(self.ldap_url, MSLDAPClient): + self.connection = self.ldap_url + + if isinstance(self.ldap_url, MSLDAPClientConnection): + self.connection = MSLDAPClient(None, None, connection = self.ldap_url) + + self.connection.keepalive = True + _, err = await self.connection.connect() + if err is not None: + raise err + + self.ldapinfo = self.connection.get_server_info() + self.domainname = self.ldapinfo['defaultNamingContext'].upper().replace('DC=','').replace(',','.') + else: + self.domainname = self.connection.rootdomain.upper().replace('DC=','').replace(',','.') + + + + + await self.print('[+] Connected to LDAP serrver') + + + await self.dump_schema() + await self.dump_lookuptable() + await self.dump_acls() + with zipfile.ZipFile(self.zipfilepath, 'w', zipfile.ZIP_DEFLATED) as self.zipfile: + await self.dump_domains() + await self.dump_users() + await self.dump_computers() + await self.dump_groups() + await self.dump_gpos() + await self.dump_ous() + await self.dump_containers() + + + + async def run(self): + await self.dump_ldap() \ No newline at end of file diff --git a/msldap/client.py b/msldap/client.py index c718b56..8f76b5e 100644 --- a/msldap/client.py +++ b/msldap/client.py @@ -309,6 +309,14 @@ async def get_all_containers(self, attrs:List[str] = MSADContainer_ATTRS): return yield MSADContainer.from_ldap(entry), None #self._ldapinfo logger.debug('Finished polling for entries!') + + async def get_all_foreignsecurityprincipals(self, attrs:List[str]): + ldap_filter = r'(&(objectClass=foreignSecurityPrincipal)(objectCategory=foreignSecurityPrincipal))' + async for entry, err in self.pagedsearch(ldap_filter, attrs): + if err is not None: + yield None, err + return + yield entry, None async def get_all_laps(self): """ diff --git a/msldap/commons/ldif.py b/msldap/commons/ldif.py new file mode 100644 index 0000000..1b85927 --- /dev/null +++ b/msldap/commons/ldif.py @@ -0,0 +1,127 @@ +import io +import base64 +from typing import Dict, List + +class LDIFIdx: + def __init__(self, start, end): + self.start = start + self.end = end + self.length = end - start + +class MSLDAPLdiff: + def __init__(self, max_cache_size:int = 10000): + self.filename:str = None + self.filehandle:io.BytesIO = None + self.dn_index:Dict[str, LDIFIdx] = {} + self.objectclass_index = {} + self.samaccounttype_index = {} + self.objecttype_index = {} + + self.max_cache_size = max_cache_size + self.dncache:Dict[str, List[Dict[str, str]]] = {} + + @staticmethod + async def from_file(filename:str): + ldiff = MSLDAPLdiff() + ldiff.filename = filename + await ldiff.parse() + + async def open_or_handle(self): + if self.filehandle is None: + self.filehandle = open(self.filename, 'r', encoding='utf-8') + return self.filehandle + + async def build_index(self): + print('[+] Building index...') + with open(self.filename, 'r', encoding='utf-8') as f: + while True: + pos = f.tell() # Current position in the file + line = f.readline() + + if not line: # End of file + if current_dn is not None: + # Store the end position for the last entry + self.dn_index[current_dn] = LDIFIdx(start_pos, pos) + break + + if line.startswith('dn: '): + if current_dn is not None: + # Store the end position for the previous entry + self.dn_index[current_dn] = LDIFIdx(start_pos, pos) + + current_dn = line.strip().upper() + start_pos = pos + if line.startswith('objectClass: '): + objectclass = line.strip().upper() + if objectclass not in self.objectclass_index: + self.objectclass_index[objectclass] = [] + self.objectclass_index[objectclass].append(current_dn) + + if line.startswith('sAMAccountType: '): + objectclass = line.strip().upper() + if objectclass not in self.objectclass_index: + self.objectclass_index[objectclass] = [] + self.samaccounttype_index[objectclass].append(current_dn) + + if line.startswith('objectType: '): + objectclass = line.strip().upper() + if objectclass not in self.objectclass_index: + self.objectclass_index[objectclass] = [] + self.objecttype_index[objectclass].append(current_dn) + + elif not line.strip() and current_dn is not None: + # Optional: Handle blank lines between entries if needed + self.dn_index[current_dn] = LDIFIdx(start_pos, pos) + current_dn = None + + + async def fetch(self, dn:str): + dn = dn.upper() + if dn in self.dncache: + return self.dncache[dn] + + if dn not in self.dn_index: + return None + + raw_entry = [] + f = await self.open_or_handle() + idx = self.dn_index[dn] + f.seek(idx.start) + data = f.read(idx.length) + for line in data.split('\n'): + line = line.strip() + if line == '': + continue + if line.startswith('#'): + continue + raw_entry.append(line) + + entry = self.parse_entry(raw_entry) + if len(self.dncache) > self.max_cache_size: + self.dncache.popitem() + + def parse_entry(self, raw_entry:List[str]): + ed = {} + for line in raw_entry: + line = line.strip() + if line == '': + continue + if line.startswith('#'): + continue + + key, value = line.split(':', 1) + key = key.strip() + value = value.strip() + + if line.split(':', 1)[0].endswith('::'): + value = base64.b64decode(value) + + if key not in ed: + ed[key] = [] + + ed[key].append(value) + return ed + + async def parse(self): + await self.build_index() + \ No newline at end of file diff --git a/msldap/commons/utils.py b/msldap/commons/utils.py index 6f83ec7..7475964 100644 --- a/msldap/commons/utils.py +++ b/msldap/commons/utils.py @@ -49,22 +49,7 @@ def bh_dt_convert(dt:datetime.datetime): return -1 ts = max(0,int(dt.timestamp())) return ts - - -#taken from bloodhound.py -def is_filtered_container(containerdn): - if "CN=DOMAINUPDATES,CN=SYSTEM,DC=" in containerdn.upper(): - return True - if "CN=POLICIES,CN=SYSTEM,DC=" in containerdn.upper() and (containerdn.upper().startswith('CN=USER') or containerdn.upper().startswith('CN=MACHINE')): - return True - return False -def is_filtered_container_child(containerdn): - if "CN=PROGRAM DATA,DC=" in containerdn.upper(): - return True - if "CN=SYSTEM,DC=" in containerdn.upper(): - return True - return False FUNCTIONAL_LEVELS = { 0: "2000 Mixed/Native", diff --git a/msldap/examples/msldapbloodhound.py b/msldap/examples/msldapbloodhound.py index 8c646fa..549d3c1 100644 --- a/msldap/examples/msldapbloodhound.py +++ b/msldap/examples/msldapbloodhound.py @@ -1,651 +1,5 @@ -import zipfile -import json -import base64 import asyncio -import datetime - -from tqdm import tqdm - -from msldap.external.bloodhoundpy.acls import parse_binary_acl -from msldap.external.bloodhoundpy.resolver import resolve_aces, WELLKNOWN_SIDS -from msldap.commons.utils import is_filtered_container, is_filtered_container_child -from msldap.commons.factory import LDAPConnectionFactory - - - -def reverse_dn_components(dn:str): - rdns = ','.join(reversed(dn.split(','))) - return rdns.upper() - -def explode_dn(dn): - parts = [] - esc = False - part = '' - - for char in dn: - if esc: - part += char - esc = False - elif char == '\\': - esc = True - part += char - elif char == ',': - if part: - parts.append(part) - part = '' - else: - part += char - - if part: - parts.append(part) - - return parts - - -def parse_gplink_string(linkstr): - if not linkstr: - return - for links in linkstr.split('[LDAP://')[1:]: - dn, options = links.rstrip('][').split(';') - yield dn, int(options) - - -class MSLDAPDump2Bloodhound: - def __init__(self, url): - self.ldap_url = url - self.connection = None - self.ldapinfo = None - self.domainname = None - self.domainsid = None - self.with_progress = True - - self.DNs = {} - self.DNs_sorted = {} - self.ocache = {} - self.schema = {} - self.aces = {} - self.computer_sidcache = {} - self.token_map = {} - - self.curdate = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S') - self.zipfilepath = '%s_Bloodhound.zip' % self.curdate - self.zipfile = None - - self.totals = { - 'user' : 0, - 'computer' : 0, - 'group' : 0, - 'ou' : 0, - 'gpo' : 0, - 'container' : 0, - 'domain' : 0, - 'trust' : 0 - } - - - def create_progress(self, label, total = None): - if self.with_progress is True: - return tqdm(desc = label, total=total) - else: - print('[+] %s' % label) - return None - - def update_progress(self, pbar, value = 1): - if pbar is None: - return - if self.with_progress is True: - pbar.update(value) - - def close_progress(self, pbar): - if pbar is None: - return - if self.with_progress is True: - pbar.close() - - async def lookup_dn_children(self, parent_dn): - parent_dn = parent_dn.upper() - parent_dn_reversed = reverse_dn_components(parent_dn) - if parent_dn not in self.DNs: - print('DN not found: %s' % parent_dn_reversed) - return [] - - branch = self.DNs_sorted - level = 0 - for part in explode_dn(parent_dn_reversed): - level += 1 - if part not in branch: - print('Part not found: %s Full: %s Branch: %s Level: %s Parts: %s' % (part, parent_dn_reversed, branch.keys(), level, explode_dn(parent_dn_reversed))) - return [] - branch = branch[part] - - res_dns = [] - for dnpart in branch: - res_dns.append(dnpart + ',' + parent_dn) - - results = [] - for tdn in res_dns: - if is_filtered_container_child(tdn): - continue - if tdn not in self.DNs: - print('Missing %s' % tdn) - continue - #attrs, err = await self.connection.dnattrs(tdn, ['objectGUID', 'objectClass','sAMAaccountType', 'sAMAccountName', 'objectSid']) - #print(attrs) - entry = self.ocache[self.DNs[tdn]] - results.append({ - 'ObjectIdentifier': entry['ObjectIdentifier'].upper(), - 'ObjectType': entry['ObjectType'].capitalize() if entry['ObjectType'].lower() != 'ou' else 'OU', - }) - - return results - - async def dump_schema(self): - pbar = self.create_progress('Dumping schema') - async for entry, err in self.connection.get_all_schemaentry(['name', 'schemaIDGUID']): - if err is not None: - raise err - self.update_progress(pbar) - self.schema[entry.name.lower()] = str(entry.schemaIDGUID) - self.close_progress(pbar) - - def add_ocache(self, dn, objectid, principal, otype, dns = '', spns = None): - self.totals[otype] += 1 - if objectid in WELLKNOWN_SIDS: - objectid = '%s-%s' % (self.domainname.upper(), objectid.upper()) - self.ocache[objectid] = { - 'dn' : dn.upper(), - 'ObjectIdentifier' : objectid, - 'principal' : principal, - 'ObjectType' : otype, - } - self.DNs[dn.upper()] = objectid - if otype == 'computer': - entry = { - 'ObjectIdentifier' : objectid, - 'ObjectType' : otype - } - if dns is None: - dns = '' - self.computer_sidcache[dns.lower()] = entry - if spns is not None: - for spn in spns: - target = spn.split('/')[1] - target = target.split(':')[0] - self.computer_sidcache[target.lower()] = entry - - async def dump_lookuptable(self): - pbar = self.create_progress('Generating lookuptable') - # domains - adinfo, err = await self.connection.get_ad_info() - if err is not None: - raise err - self.domainsid = adinfo.objectSid - self.add_ocache(adinfo.distinguishedName, adinfo.objectSid, '', 'domain') - self.update_progress(pbar) - - #trusts - async for entry, err in self.connection.get_all_trusts(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'trust') - self.update_progress(pbar) - - #users - async for entry, err in self.connection.get_all_users(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName']): - if err is not None: - raise err - short_name = entry.sAMAccountName - self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'user') - self.update_progress(pbar) - - #machines - async for entry, err in self.connection.get_all_machines(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName', 'dNSHostName', 'servicePrincipalName']): - if err is not None: - raise err - short_name = entry.sAMAccountName - dns = entry.dNSHostName - if dns is None: - dns = '' - self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'computer', dns, entry.servicePrincipalName) - self.update_progress(pbar) - - #groups - async for entry, err in self.connection.get_all_groups(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectSid, '', 'group') - self.update_progress(pbar) - - #ous - async for entry, err in self.connection.get_all_ous(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'ou') - self.update_progress(pbar) - - #containers - async for entry, err in self.connection.get_all_containers(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - if is_filtered_container(entry.distinguishedName): - continue - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'container') - self.update_progress(pbar) - - #gpos - async for entry, err in self.connection.get_all_gpos(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'gpo') - self.update_progress(pbar) - - #foreignsecurityprincipal - query = '(&(objectClass=foreignSecurityPrincipal)(objectCategory=foreignSecurityPrincipal))' - async for entry, err in self.connection.pagedsearch(query, ['name','sAMAccountName', 'objectSid', 'objectGUID', 'distinguishedName', 'objectClass']): - bhentry = {} - entry = entry['attributes'] - if entry['objectSid'] in WELLKNOWN_SIDS: - bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) - bhentry['principal'] = self.domainname.upper() - bhentry['type'] = 'foreignsecurityprincipal' - if 'name' in entry: - if entry['name'] in WELLKNOWN_SIDS: - gname, sidtype = WELLKNOWN_SIDS[entry['name']] - bhentry['type'] = sidtype.capitalize() - bhentry['principal'] = '%s@%s' % (gname.upper(), self.domainname.upper()) - bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) - else: - bhentry['objectid'] = entry['name'] - - self.ocache[bhentry['objectid']] = { - 'dn' : entry['distinguishedName'].upper(), - 'ObjectIdentifier' : bhentry['objectid'], - 'principal' : bhentry['principal'], - 'ObjectType' : bhentry['type'], - } - self.DNs[entry['distinguishedName'].upper()] = bhentry['objectid'] - - print(entry) - - self.close_progress(pbar) - - for dn in [reverse_dn_components(dn) for dn in self.DNs]: - branch = self.DNs_sorted - for part in explode_dn(dn): - if part not in branch: - branch[part.upper()] = {} - branch = branch[part.upper()] - - with open('dn.json', 'w') as f: - json.dump(self.DNs, f, indent=4) - - with open('dntree.json', 'w') as f: - json.dump(self.DNs_sorted, f, indent=4) - - async def dump_acls(self): - pbar = self.create_progress('Dumping SDs', total=len(self.ocache)) - for sid in self.ocache: - dn = self.ocache[sid]['dn'] - secdesc, err = await self.connection.get_objectacl_by_dn(dn) - if err is not None: - raise err - dn = dn.upper() - oentry = { - 'IsACLProtected' : None, - 'Properties' : { - 'haslaps' : 'ms-mcs-admpwd' in self.schema - } - } - otype = self.ocache[sid]['ObjectType'] - if otype == 'trust': - continue - if otype == 'ou': - otype = 'organizational-unit' - if dn.upper() not in self.aces: - aces, relations = parse_binary_acl(oentry, otype.lower(), secdesc, self.schema) - self.aces[dn.upper()] = (aces, relations) - self.update_progress(pbar) - self.close_progress(pbar) - - async def resolve_gplink(self, gplinks): - if gplinks is None: - return [] - - links = [] - for gplink_dn, options in parse_gplink_string(gplinks): - link = {} - link['IsEnforced'] = options == 2 - if reverse_dn_components(gplink_dn.upper()) in self.DNs: - lguid = self.DNs[reverse_dn_components(gplink_dn.upper())]['ObjectIdentifier'] - else: - attrs, err = await self.connection.dnattrs(gplink_dn, ['objectGUID', 'objectSid']) - if err is not None: - raise err - lguid = attrs['objectGUID'] - link['GUID'] = lguid.upper() - links.append(link) - return links - - def remove_hidden(self, entry): - to_del = [] - for k in entry: - if k.startswith('_'): - to_del.append(k) - for k in to_del: - del entry[k] - return entry - - async def dump_domains(self): - pbar = self.create_progress('Dumping domains', self.totals['domain']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'domains', - 'version': 5, - 'count': 0 - } - } - - adinfo, err = await self.connection.get_ad_info() - if err is not None: - raise err - - domainentry = adinfo.to_bh(self.domainname) - meta, relations = self.aces[domainentry['Properties']['distinguishedname'].upper()] - domainentry['IsACLProtected'] = meta['IsACLProtected'] - domainentry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - domainentry['ChildObjects'] = await self.lookup_dn_children(domainentry['Properties']['distinguishedname']) - domainentry['Links'] = await self.resolve_gplink(domainentry['_gPLink']) - - async for entry, err in self.connection.get_all_trusts(): - if err is not None: - raise err - domainentry['Trusts'].append(entry.to_bh()) - - domainentry = self.remove_hidden(domainentry) - jsonstruct['data'].append(domainentry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_domains.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('domains.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_users(self): - pbar = self.create_progress('Dumping users', self.totals['user']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'users', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_users(): - entry = ldapentry.to_bh(self.domainname) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - - if entry['_allowerdtodelegateto'] is not None: - seen = [] - for host in entry['_allowerdtodelegateto']: - try: - target = host.split('/')[1] - target = target.split(':')[0] - except IndexError: - print('[!] Invalid delegation target: %s', host) - continue - try: - sid = self.computer_sidcache[target.lower()] - if sid['ObjectIdentifier'] in seen: - continue - seen[sid['ObjectIdentifier']] = 1 - entry['AllowedToDelegate'].append(sid) - except KeyError: - if '.' in target: - entry['AllowedToDelegate'].append(target.upper()) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_users.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('users.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_computers(self): - pbar = self.create_progress('Dumping computers', self.totals['computer']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'computers', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_machines(): - entry = ldapentry.to_bh(self.domainname) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - - if entry['_allowedtoactonbehalfofotheridentity'] is not None: - allowedacl = base64.b64decode(entry['_allowedtoactonbehalfofotheridentity']) - entryres, relations = parse_binary_acl(entry, 'computer', allowedacl, self.schema) - - for ace in resolve_aces(relations, self.domainname, self.domainsid, self.ocache): - if ace['RightName'] == 'Owner': - continue - if ace['RightName'] == 'GenericAll': - entryres['AllowedToAct'].append({ - 'ObjectIdentifier': ace['PrincipalSID'], - 'ObjectType': ace['PrincipalType'].capitalize() - }) - - del entry['_allowedtoactonbehalfofotheridentity'] - if entry['Properties']['allowedtodelegate'] is not None: - seen = {} - for host in entry['Properties']['allowedtodelegate']: - try: - target = host.split('/')[1] - target = target.split(':')[0] - except IndexError: - print('[!] Invalid delegation target: %s', host) - continue - try: - sid = self.computer_sidcache[target.lower()] - if sid['ObjectIdentifier'] in seen: - continue - seen[sid['ObjectIdentifier']] = 1 - entry['AllowedToDelegate'].append(sid) - except KeyError: - if '.' in target: - entry['AllowedToDelegate'].append({ - "ObjectIdentifier": target.upper(), - "ObjectType": "Computer" - }) - - entry = self.remove_hidden(entry) - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_computers.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('computers.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_groups(self): - pbar = self.create_progress('Dumping groups', self.totals['group']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'groups', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_groups(): - entry = ldapentry.to_bh(self.domainname) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - - if ldapentry.member is not None: - for member in ldapentry.member: - if member.upper() in self.DNs: - oid = self.DNs[member.upper()] - entry['Members'].append({ - 'ObjectIdentifier' : self.ocache[oid]['ObjectIdentifier'], - 'ObjectType' : self.ocache[oid]['ObjectType'].capitalize() - }) - else: - if member.find('ForeignSecurityPrincipals') != -1: - continue - - entry = self.remove_hidden(entry) - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_groups.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('groups.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_gpos(self): - pbar = self.create_progress('Dumping GPOs', self.totals['gpo']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'gpos', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_gpos(): - entry = ldapentry.to_bh(self.domainname, self.domainsid) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_gpos.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('gpos.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_ous(self): - pbar = self.create_progress('Dumping OUs', self.totals['ou']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'ous', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_ous(): - if err is not None: - raise err - entry = ldapentry.to_bh(self.domainname, self.domainsid) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) - entry['Links'] = await self.resolve_gplink(entry['_gPLink']) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_ous.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('ous.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_containers(self): - pbar = self.create_progress('Dumping Containers', self.totals['container']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'containers', - 'version': 5, - 'count': 0 - } - } - async for ldapentry, err in self.connection.get_all_containers(): - if err is not None: - raise err - if is_filtered_container(ldapentry.distinguishedName): - continue - entry = ldapentry.to_bh(self.domainname, self.domainsid) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_containers.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('containers.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_ldap(self): - print('[+] Connecting to LDAP server') - self.conn_url = LDAPConnectionFactory.from_url(self.ldap_url) - self.connection = self.conn_url.get_client() - self.connection.keepalive = True - _, err = await self.connection.connect() - if err is not None: - raise err - self.ldapinfo = self.connection.get_server_info() - self.domainname = self.ldapinfo['defaultNamingContext'].upper().replace('DC=','').replace(',','.') - - print('[+] Connected to LDAP serrver') - - - await self.dump_schema() - await self.dump_lookuptable() - await self.dump_acls() - with zipfile.ZipFile(self.zipfilepath, 'w', zipfile.ZIP_DEFLATED) as self.zipfile: - await self.dump_domains() - await self.dump_users() - await self.dump_computers() - await self.dump_groups() - await self.dump_gpos() - await self.dump_ous() - await self.dump_containers() - - - - async def run(self): - await self.dump_ldap() +from msldap.bloodhound import MSLDAPDump2Bloodhound async def amain(): args = parser.parse_args() diff --git a/msldap/examples/msldapclient.py b/msldap/examples/msldapclient.py index 8d14c31..8267ccd 100644 --- a/msldap/examples/msldapclient.py +++ b/msldap/examples/msldapclient.py @@ -45,7 +45,6 @@ from msldap.wintypes.asn1.sdflagsrequest import SDFlagsRequest from tabulate import tabulate from msldap.commons.exceptions import LDAPSearchException -from msldap.commons.utils import is_filtered_container class MSLDAPClientConsole(aiocmd.PromptToolkitCmd): def __init__(self, url = None): @@ -170,346 +169,237 @@ async def do_computeraddr(self): async def do_dump(self): """Fetches ALL user and machine accounts from the domain with a LOT of attributes""" zip_filename = 'dump_%s.zip' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - sdtempname = 'sdtemp_%s.txt' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") try: await self.do_adinfo(False) await self.do_ldapinfo(False) tname = 'domain_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname users_filename = '%s.tsv' % tname - with open(sdtempname, 'w', newline = '') as sdtemp: + try: + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADInfo_ATTRS)+'\r\n') + adinfo, err = await self.connection.get_ad_info() + if err is not None: + raise err + f.write('\t'.join(adinfo.get_row(MSADInfo_ATTRS))+'\r\n') + print('Adinfo was written to %s' % users_filename) + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADInfo_ATTRS)+'\r\n') - adinfo, err = await self.connection.get_ad_info() - if err is not None: - raise err - f.write('\t'.join(adinfo.get_row(MSADInfo_ATTRS))+'\r\n') - sdtemp.write('domain:'+adinfo.distinguishedName + '\r\n') - b.write(json.dumps(adinfo.to_bh(self.adinfo.name))+'\r\n') - print('Adinfo was written to %s' % users_filename) - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'schema_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - users_filename = '%s.json' % tname + tname = 'schema_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.json' % tname - try: - pbar = tqdm(desc = 'Writing schema to file %s' % users_filename) - schema = {} - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - async for user, err in self.connection.get_all_schemaentry(['name', 'schemaidguid']): - if err is not None: - raise err - pbar.update() - schema[user.name.lower()] = str(user.schemaIDGUID) + try: + pbar = tqdm(desc = 'Writing schema to file %s' % users_filename) + schema = {} + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + async for user, err in self.connection.get_all_schemaentry(['name', 'schemaidguid']): + if err is not None: + raise err + pbar.update() + schema[user.name.lower()] = str(user.schemaIDGUID) - json.dump(schema, f) + json.dump(schema, f) print('Schema dump was written to %s' % users_filename) pbar.close() with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: dz.write(users_filename, arcname = users_filename) - finally: - try: - os.remove(users_filename) - except: - pass + finally: + try: + os.remove(users_filename) + except: + pass - tname = 'trusts_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'trusts_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + + try: + pbar = tqdm(desc = 'Writing trusts to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADDomainTrust_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_trusts(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADDomainTrust_ATTRS))+'\r\n') + print('Trust dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing trusts to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADDomainTrust_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_trusts(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADDomainTrust_ATTRS))+'\r\n') - sdtemp.write('trust:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh())+'\r\n') - - print('Computer dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'users_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'users_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname - try: - pbar = tqdm(desc = 'Writing users to file %s' % users_filename) + try: + pbar = tqdm(desc = 'Writing users to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADUser_TSV_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_users(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADUser_TSV_ATTRS))+'\r\n') - sdtemp.write('user:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name))+'\r\n') - print('Users dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADUser_TSV_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_users(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADUser_TSV_ATTRS))+'\r\n') + print('Users dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: + try: + os.remove(users_filename) + except: + pass - tname = 'computers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'computers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing computers to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADMachine_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_machines(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADMachine_ATTRS))+'\r\n') + print('Computer dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing computers to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADMachine_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_machines(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADMachine_ATTRS))+'\r\n') - sdtemp.write('computer:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name))+'\r\n') - print('Computer dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'groups_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'groups_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing groups to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADGroup_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_groups(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADGroup_ATTRS))+'\r\n') + print('Group dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing groups to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADGroup_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_groups(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADGroup_ATTRS))+'\r\n') - sdtemp.write('group:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name))+'\r\n') - print('Group dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'ous_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'ous_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing OUs to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADOU_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_ous(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADOU_ATTRS))+'\r\n') + print('OU dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing OUs to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADOU_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_ous(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADOU_ATTRS))+'\r\n') - sdtemp.write('ou:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name, str(self.adinfo.objectSid)))+'\r\n') - print('OU dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'containers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname - try: - pbar = tqdm(desc = 'Writing Containers to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADContainer_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_containers(): - if err is not None: - raise err - if is_filtered_container(user.distinguishedName): - continue - pbar.update() - f.write('\t'.join(user.get_row(MSADContainer_ATTRS))+'\r\n') - sdtemp.write('container:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name, str(self.adinfo.objectSid)))+'\r\n') - print('Container dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass - - tname = 'gpos_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'containers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing Containers to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADContainer_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_containers(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADContainer_ATTRS))+'\r\n') + print('Container dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing GPOs to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADOU_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_gpos(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADGPO_ATTRS))+'\r\n') - sdtemp.write('gpo:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name, str(self.adinfo.objectSid)))+'\r\n') - print('GPO dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass + tname = 'gpos_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing GPOs to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADOU_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_gpos(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADGPO_ATTRS))+'\r\n') + print('GPO dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - users_filename = 'tokens_%s.json' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - pbar = tqdm(desc = 'Writing tokens to file %s' % users_filename) - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - async for res, err in self.connection.get_all_tokengroups(): - if err is not None: - raise err - pbar.update() - f.write(json.dumps(res)+'\r\n') - print('Token dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - finally: - try: - os.remove(users_filename) - except: - pass + os.remove(users_filename) + except: + pass - try: - dns_filename = 'dns_%s.tsv' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - pbar = tqdm(desc = 'Writing DNS records to file %s' % dns_filename) - with open(dns_filename, 'w', newline='', encoding = 'utf8') as f: - async for zonedn, name, dnsrecod, err in self.connection.dnsentries(): - if err is not None: - raise err - - dnsdataobj = dnsrecod.get_formatted() - line = '\t'.join([zonedn, name, dnsrecod.Type.name, dnsdataobj.to_line()]) - - f.write(line + '\r\n') - pbar.update(1) - print('DNS dump was written to %s' % dns_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(dns_filename, arcname = dns_filename) - finally: - try: - os.remove(dns_filename) - except: - pass - - total = 0 - with open(sdtempname, 'r', newline = '') as sdtemp: - for line in sdtemp: - total += 1 - try: - with open(sdtempname, 'r', newline = '') as sdtemp: - users_filename = 'sds_%s.json' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - pbar = tqdm(desc = 'Writing SDs to file %s' % users_filename, total=total) - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - for line in sdtemp: - line = line.strip() - line = line.split(':',1) - if len(line) < 2: - continue - dn = line[1].strip() - adsec, err = await self.connection.get_objectacl_by_dn(dn) - if err is not None: - raise err - pbar.update() - f.write(json.dumps({'dn' : dn, 'otype': line[0], 'sd': base64.b64encode(adsec).decode()})+'\r\n') - print('SD dump was written to %s' % users_filename) - pbar.close() - + dns_filename = 'dns_%s.tsv' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + pbar = tqdm(desc = 'Writing DNS records to file %s' % dns_filename) + with open(dns_filename, 'w', newline='', encoding = 'utf8') as f: + async for zonedn, name, dnsrecod, err in self.connection.dnsentries(): + if err is not None: + raise err + dnsdataobj = dnsrecod.get_formatted() + line = '\t'.join([zonedn, name, dnsrecod.Type.name, dnsdataobj.to_line()]) + f.write(line + '\r\n') + pbar.update(1) + print('DNS dump was written to %s' % dns_filename) + pbar.close() with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) + dz.write(dns_filename, arcname = dns_filename) finally: try: - os.remove(users_filename) - os.remove(sdtempname) + os.remove(dns_filename) except: pass + + print('All dumps were written to %s' % zip_filename) return True - except: - try: - os.remove(sdtempname) - except: - pass - + except: traceback.print_exc() return False diff --git a/msldap/external/adexplorersnapshot/__init__.py b/msldap/external/adexplorersnapshot/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/msldap/external/adexplorersnapshot/parser/__init__.py b/msldap/external/adexplorersnapshot/parser/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/msldap/external/adexplorersnapshot/parser/classes.py b/msldap/external/adexplorersnapshot/parser/classes.py new file mode 100644 index 0000000..0d7f6ce --- /dev/null +++ b/msldap/external/adexplorersnapshot/parser/classes.py @@ -0,0 +1,360 @@ +from msldap.external.adexplorersnapshot.parser.structure import structure +from msldap.external.bloodhound.acls import LdapSid +from requests.structures import CaseInsensitiveDict +from msldap.commons.utils import bh_dt_convert + +import struct +from collections import UserDict + +import functools +import uuid +from io import BytesIO +import datetime, calendar + +ADSTYPE_INVALID = 0 +ADSTYPE_DN_STRING = 1 +ADSTYPE_CASE_EXACT_STRING = 2 +ADSTYPE_CASE_IGNORE_STRING = 3 +ADSTYPE_PRINTABLE_STRING = 4 +ADSTYPE_NUMERIC_STRING = 5 +ADSTYPE_BOOLEAN = 6 +ADSTYPE_INTEGER = 7 +ADSTYPE_OCTET_STRING = 8 +ADSTYPE_UTC_TIME = 9 +ADSTYPE_LARGE_INTEGER = 10 +ADSTYPE_PROV_SPECIFIC = 11 +ADSTYPE_OBJECT_CLASS = 12 +ADSTYPE_CASEIGNORE_LIST = 13 +ADSTYPE_OCTET_LIST = 14 +ADSTYPE_PATH = 15 +ADSTYPE_POSTALADDRESS = 16 +ADSTYPE_TIMESTAMP = 17 +ADSTYPE_BACKLINK = 18 +ADSTYPE_TYPEDNAME = 19 +ADSTYPE_HOLD = 20 +ADSTYPE_NETADDRESS = 21 +ADSTYPE_REPLICAPOINTER = 22 +ADSTYPE_FAXNUMBER = 23 +ADSTYPE_EMAIL = 24 +ADSTYPE_NT_SECURITY_DESCRIPTOR = 25 +ADSTYPE_UNKNOWN = 26 +ADSTYPE_DN_WITH_BINARY = 27 +ADSTYPE_DN_WITH_STRING = 28 + +class WrapStruct(object): + def __init__(self, snap, in_obj=None): + self.snap = snap + self.fh = snap.fh + self.log = snap.log + + if in_obj: + self._data = in_obj + else: + self._data = getattr(structure, type(self).__name__)(self.fh) + + def __getattr__(self, attr): + if attr.startswith('__') and attr.endswith('__'): + raise AttributeError + + return getattr(self._data, attr) + +class SystemTime(WrapStruct): + def __init__(self, snap=None, in_obj=None): + super().__init__(snap, in_obj) + + d = datetime.datetime(self.wYear, self.wMonth, self.wDay, self.wHour, self.wMinute, self.wSecond) + self.unixtimestamp = calendar.timegm(d.timetuple()) + + def __repr__(self): + return str(self.unixtimestamp) + +# could probably use a rewrite, but it allows us to refer to attributes dynamically, +# meaning they will be retrieved/processed when necessary +class AttributeDict(UserDict): + def __init__(self, obj, raw): + self.obj = obj + self.snap = obj.snap + self.fh = obj.fh + self.raw = raw + + self._dico = CaseInsensitiveDict() + + self.getAttribute = functools.lru_cache()(self.getAttribute) + + + def __getitem__(self, key): + ret = self.getAttribute(key, raw=self.raw) + if key.lower() == 'name': # hacked in to make resolve_ad_entry function work + return ret[0] + return ret + + @property + def data(self): + if len(self._dico) > 0: + return self._dico + + for entry in self.obj.mappingTable: + prop = self.snap.properties[entry.attrIndex] + self._dico[prop.propName] = self.processAttribute(prop, entry.attrOffset, self.raw) + + return self._dico + + def getAttribute(self, attrName, raw=False): + attrIndex = self.snap.propertyDict[attrName] + prop = self.snap.properties[attrIndex] + + for entry in self.obj.mappingTable: + if entry.attrIndex == attrIndex: + return self.processAttribute(prop, entry.attrOffset, raw) + + raise KeyError + + def processAttribute(self, prop, attrOffset, raw): + + attrName = prop.propName.lower() + attrType = prop.adsType + + # at the offset at which the attribute is stored, + # - the first quad indicates how many elements are in the attribute (attributes can be multi-valued), + # - the bytes after depend on what sort of information is stored (e.g. for DN_STRING, the quads after are the offsets at which the element values are stored) + + fileAttrOffset = self.obj.fileOffset + attrOffset + self.fh.seek(fileAttrOffset) + numValues = structure.uint32(self.fh) + + values = [] + + # https://docs.microsoft.com/en-us/windows/win32/api/iads/ns-iads-adsvalue + # https://docs.microsoft.com/en-us/windows/win32/adsi/adsi-simple-data-types + + if attrType in [ADSTYPE_DN_STRING, ADSTYPE_CASE_IGNORE_STRING, ADSTYPE_CASE_IGNORE_STRING, ADSTYPE_PRINTABLE_STRING, ADSTYPE_NUMERIC_STRING, ADSTYPE_OBJECT_CLASS]: + offsets = structure.uint32[numValues](self.fh) + + for v in range(numValues): + self.fh.seek(fileAttrOffset + offsets[v]) # this can also be a negative offset, e.g. referencing data in a previous object + val = structure.wchar[None](self.fh) + values.append(val) + + elif attrType == ADSTYPE_OCTET_STRING: + lengths = structure.uint32[numValues](self.fh) + + for v in range(numValues): + octetStr = structure.char[lengths[v]](self.fh) + val = octetStr + + if not raw: + if len(octetStr) == 16 and attrName.endswith("guid"): + val = str(uuid.UUID(bytes_le=octetStr)) + elif attrName == 'objectsid': + val = str(LdapSid(BytesIO(octetStr))) + + values.append(val) + + elif attrType == ADSTYPE_BOOLEAN: + assert numValues == 1, ["Multiple boolean values, verify data size", self.fileOffset, attrName] + + for v in range(numValues): + val = bool(structure.uint32(self.fh)) # not sure if uint32 is correct type here, check against more data sets + values.append(val) + + elif attrType == ADSTYPE_INTEGER: + + for v in range(numValues): + # defined as DWORD, so reading as uint32 (unsigned) + val = structure.uint32(self.fh) + values.append(val) + + elif attrType == ADSTYPE_LARGE_INTEGER: + + for v in range(numValues): + # defined as LARGE_INTEGER, interestingly this is an int64 (signed) according to MS docs + val = structure.int64(self.fh) + values.append(val) + + elif attrType == ADSTYPE_UTC_TIME: # note that date/times can also be returned as Interval type instead (ADSTYPE_LARGE_INTEGER) - actual time units depend on which attribute is using it + + for v in range(numValues): + systime = SystemTime(self.snap) + val = systime.unixtimestamp + values.append(val) + + elif attrType == ADSTYPE_NT_SECURITY_DESCRIPTOR: + + for v in range(numValues): + lenDescriptorBytes = structure.uint32(self.fh) + descriptorBytes = self.fh.read(lenDescriptorBytes) + values.append(descriptorBytes) + + else: + if self.log: + self.log.warn("Unhandled adsType: %s -> %d" % (attrName, attrType)) + + return values + +class Object(WrapStruct): + def __init__(self, snap=None, in_obj=None): + super().__init__(snap, in_obj) + + self.fileOffset = self.fh.tell() - 4 - 4 - (self.tableSize * 8) + self.fh.seek(self.fileOffset + self.objSize) # move file pointer to the next object + + self.attributes = AttributeDict(self, raw=False) + self.raw_attributes = AttributeDict(self, raw=True) + + self.getObjectClasses = functools.lru_cache()(self.getObjectClasses) + self.getObjectCategory = functools.lru_cache()(self.getObjectCategory) + + def getObjectClasses(self): + return list(map(str.casefold, self.attributes.get('objectClass', []))) + + def getObjectCategory(self): + catDN = self.attributes.get('objectCategory', None) + if catDN is None: + return None + + catDN = catDN[0] + catObj = self.snap.classes.get(catDN, None) + if catObj: + return catObj.className.lower() + else: + return None + + classes = property(getObjectClasses) + category = property(getObjectCategory) + + # for easy compatibility with the bloodhound lib + def __getitem__(self, key): + if key == "attributes": + return self.attributes + elif key == "raw_attributes": + return self.raw_attributes + else: + return None + +class Property(WrapStruct): + def __init__(self, snap=None, in_obj=None): + super().__init__(snap, in_obj) + + self.propName = self.propName.rstrip('\x00') + self.DN = self.DN.rstrip('\x00') + self.schemaIDGUID = uuid.UUID(bytes_le=self.schemaIDGUID) + +class Class(WrapStruct): + def __init__(self, snap=None, in_obj=None): + super().__init__(snap, in_obj) + + self.className = self.className.rstrip('\x00') + self.DN = self.DN.rstrip('\x00') + self.schemaIDGUID = uuid.UUID(bytes_le=self.schemaIDGUID) + +class Header(WrapStruct): + def __init__(self, snap, in_obj=None): + super().__init__(snap, in_obj) + + self.server = self.server.rstrip('\x00') + self.mappingOffset = (self.fileoffsetHigh << 32) | self.fileoffsetLow + self.filetimeUnix = bh_dt_convert(self.filetime) #ADUtils.win_timestamp_to_unix(self.filetime) + +class Snapshot(object): + def __init__(self, fh, log=None): + self.fh = fh + self.log = log + self.objectOffsets = {} + + # the order in which we're parsing matters, due to the file handle's position + # typically, you would call as follows: + + # self.parseHeader() + # self.parseObjectOffsets() + # self.parseProperties() + # self.parseClasses() + # self.parseRights() + + def parseHeader(self): + self.fh.seek(0) + self.header = Header(self) + + def parseObjectOffsets(self): + self.fh.seek(0x43e) + + # we are only keeping offsets at this stage, as some databases grow very big + + if self.log: + prog = self.log.progress(f"Parsing object offsets", rate=0.1) + + self.objectOffsets = [] + for i in range(self.header.numObjects): + pos = self.fh.tell() + objSize = struct.unpack(" Date: Thu, 7 Dec 2023 09:00:44 +0100 Subject: [PATCH 2/6] adexplorer done, better prints etc --- msldap/bloodhound.py | 260 +++++++------ .../snapshot.py => commons/adexplorer.py} | 158 ++++---- msldap/examples/msldapbloodhound.py | 2 +- .../external/adexplorersnapshot/__init__.py | 0 .../adexplorersnapshot/parser/__init__.py | 0 .../adexplorersnapshot/parser/classes.py | 360 ------------------ .../adexplorersnapshot/parser/structure.py | 130 ------- msldap/external/bloodhoundpy/acls.py | 6 +- msldap/external/bloodhoundpy/resolver.py | 4 +- 9 files changed, 233 insertions(+), 687 deletions(-) rename msldap/{external/adexplorersnapshot/parser/snapshot.py => commons/adexplorer.py} (88%) delete mode 100644 msldap/external/adexplorersnapshot/__init__.py delete mode 100644 msldap/external/adexplorersnapshot/parser/__init__.py delete mode 100644 msldap/external/adexplorersnapshot/parser/classes.py delete mode 100644 msldap/external/adexplorersnapshot/parser/structure.py diff --git a/msldap/bloodhound.py b/msldap/bloodhound.py index 3415c32..d9785eb 100644 --- a/msldap/bloodhound.py +++ b/msldap/bloodhound.py @@ -3,6 +3,7 @@ import json import base64 import datetime +import asyncio from tqdm import tqdm @@ -12,19 +13,22 @@ from msldap.commons.factory import LDAPConnectionFactory from msldap.connection import MSLDAPClientConnection from msldap.client import MSLDAPClient -from msldap.external.adexplorersnapshot.parser.snapshot import Snapshot +from msldap.commons.adexplorer import Snapshot +from msldap import logger async def dummy_print(msg): print(msg) class MSLDAPDump2Bloodhound: - def __init__(self, url: str or MSLDAPClient or LDAPConnectionFactory or MSLDAPClientConnection, progress = True, output_path = None, print_cb = None): + def __init__(self, url: str or MSLDAPClient or LDAPConnectionFactory or MSLDAPClientConnection, progress = True, output_path = None, use_mp:bool=True, print_cb = None): self.debug = False self.ldap_url = url self.connection: MSLDAPClient = None self.ldapinfo = None self.domainname = None self.domainsid = None + self.use_mp = use_mp + self.mp_sdbatch_length = 5000 self.print_cb = print_cb self.with_progress = progress if self.print_cb is None: @@ -78,6 +82,18 @@ async def close_progress(self, pbar): return if self.with_progress is True: pbar.close() + + def get_json_wrapper(self, enumtype): + return { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': enumtype, + 'version': 5, + 'count': 0 + } + } + def split_json(self, enumtype, data): if data['meta']['count'] <= self.MAX_ENTRIES_PER_FILE: @@ -101,21 +117,19 @@ def split_json(self, enumtype, data): yield jsonstruct - async def write_json_to_zip(self, enumtype, data): - filepart = 0 - for chunk in self.split_json(enumtype, data): - if filepart == 0: - filename = '%s_%s.json' % (self.curdate, enumtype) - else: - filename = '%s_%s_%02d.json' % (self.curdate, enumtype, filepart) - self.zipfile.writestr(filename, json.dumps(chunk)) - filepart += 1 + async def write_json_to_zip(self, enumtype, data, filepart = 0): + if filepart == 0: + filename = '%s_%s.json' % (self.curdate, enumtype) + else: + filename = '%s_%s_%02d.json' % (self.curdate, enumtype, filepart) + self.zipfile.writestr(filename, json.dumps(data)) + async def lookup_dn_children(self, parent_dn): parent_dn = parent_dn.upper() parent_dn_reversed = reverse_dn_components(parent_dn) if parent_dn not in self.DNs: - await self.print('DN not found: %s' % parent_dn_reversed) + logger.debug('[BH] DN not found: %s' % parent_dn_reversed) return [] branch = self.DNs_sorted @@ -123,7 +137,7 @@ async def lookup_dn_children(self, parent_dn): for part in explode_dn(parent_dn_reversed): level += 1 if part not in branch: - await self.print('Part not found: %s Full: %s Branch: %s Level: %s Parts: %s' % (part, parent_dn_reversed, branch.keys(), level, explode_dn(parent_dn_reversed))) + logger.debug('[BH] Part not found: %s Full: %s Branch: %s Level: %s Parts: %s' % (part, parent_dn_reversed, branch.keys(), level, explode_dn(parent_dn_reversed))) return [] branch = branch[part] @@ -136,9 +150,12 @@ async def lookup_dn_children(self, parent_dn): if is_filtered_container_child(tdn): continue if tdn not in self.DNs: - #await self.print('Missing %s' % tdn) - #continue attrs, err = await self.connection.dnattrs(tdn, ['distinguishedName','objectGUID', 'objectClass','sAMAaccountType', 'sAMAccountName', 'objectSid', 'name']) + if err is not None: + raise err + if attrs is None or len(attrs) == 0: + logger.debug('[BH] Missing DN: %s' % tdn) + continue res = self.resolve_entry(attrs) results.append({ 'ObjectIdentifier': res['objectid'].upper(), @@ -155,6 +172,10 @@ async def lookup_dn_children(self, parent_dn): async def dump_schema(self): pbar = await self.create_progress('Dumping schema') + # manual stuff here... + # https://learn.microsoft.com/en-us/windows/win32/adschema/c-foreignsecurityprincipal + self.schema['foreignsecurityprincipal'] = '89e31c12-8530-11d0-afda-00c04fd930c9' + async for entry, err in self.connection.get_all_schemaentry(['name', 'schemaIDGUID']): if err is not None: raise err @@ -296,6 +317,9 @@ async def dump_lookuptable(self): async for entry, err in self.connection.get_all_foreignsecurityprincipals(['name','sAMAccountName', 'objectSid', 'objectGUID', 'distinguishedName', 'objectClass']): bhentry = {} entry = entry['attributes'] + if 'container' in entry.get('objectClass', []) is True: + continue + if entry['objectSid'] in WELLKNOWN_SIDS: bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) bhentry['principal'] = self.domainname.upper() @@ -316,8 +340,6 @@ async def dump_lookuptable(self): 'ObjectType' : bhentry['type'], } self.DNs[entry['distinguishedName'].upper()] = bhentry['objectid'] - - #await self.print(entry) await self.close_progress(pbar) @@ -336,6 +358,8 @@ async def dump_lookuptable(self): json.dump(self.DNs_sorted, f, indent=4) async def dump_acls(self): + sdbatch = [] + tasks = [] pbar = await self.create_progress('Dumping SDs', total=len(self.ocache)) for sid in self.ocache: dn = self.ocache[sid]['dn'] @@ -355,9 +379,34 @@ async def dump_acls(self): if otype == 'ou': otype = 'organizational-unit' if dn.upper() not in self.aces: - aces, relations = parse_binary_acl(oentry, otype.lower(), secdesc, self.schema) - self.aces[dn.upper()] = (aces, relations) + if self.use_mp is True: + from concurrent.futures import ProcessPoolExecutor + sdbatch.append((dn, oentry, otype.lower(), secdesc, self.schema)) + if len(sdbatch) > self.mp_sdbatch_length: + loop = asyncio.get_running_loop() + with ProcessPoolExecutor() as executor: + for sde in sdbatch: + tasks.append(loop.run_in_executor(executor, parse_binary_acl, *sde)) + results = await asyncio.gather(*tasks) + for dn, aces, relations in results: + self.aces[dn.upper()] = (aces, relations) + sdbatch = [] + tasks = [] + else: + dn, aces, relations = parse_binary_acl(dn, oentry, otype.lower(), secdesc, self.schema) + self.aces[dn.upper()] = (aces, relations) await self.update_progress(pbar) + + if len(sdbatch) != 0: + loop = asyncio.get_running_loop() + with ProcessPoolExecutor() as executor: + for sde in sdbatch: + tasks.append(loop.run_in_executor(executor, parse_binary_acl, *sde)) + results = await asyncio.gather(*tasks) + for dn, aces, relations in results: + self.aces[dn.upper()] = (aces, relations) + sdbatch = [] + tasks = [] await self.close_progress(pbar) async def resolve_gplink(self, gplinks): @@ -371,10 +420,17 @@ async def resolve_gplink(self, gplinks): if reverse_dn_components(gplink_dn.upper()) in self.DNs: lguid = self.DNs[reverse_dn_components(gplink_dn.upper())]['ObjectIdentifier'] else: - attrs, err = await self.connection.dnattrs(gplink_dn, ['objectGUID', 'objectSid']) + attrs, err = await self.connection.dnattrs(gplink_dn.upper(), ['objectGUID', 'objectSid']) if err is not None: raise err - lguid = attrs['objectGUID'] + if attrs is None or len(attrs) == 0: + logger.debug('[BH] Missing DN: %s' % gplink_dn) + continue + try: + lguid = attrs['objectGUID'] + except: + logger.debug('[BH] Missing GUID for %s' % gplink_dn) + continue link['GUID'] = lguid.upper() links.append(link) return links @@ -390,16 +446,6 @@ def remove_hidden(self, entry): async def dump_domains(self): pbar = await self.create_progress('Dumping domains', self.totals['domain']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'domains', - 'version': 5, - 'count': 0 - } - } - adinfo, err = await self.connection.get_ad_info() if err is not None: raise err @@ -412,6 +458,8 @@ async def dump_domains(self): domainentry['ChildObjects'] = await self.lookup_dn_children(domainentry['Properties']['distinguishedname']) domainentry['Links'] = await self.resolve_gplink(domainentry['_gPLink']) + jsonstruct = self.get_json_wrapper('domains') + filectr = 0 async for entry, err in self.connection.get_all_trusts(): if err is not None: raise err @@ -420,9 +468,14 @@ async def dump_domains(self): domainentry = self.remove_hidden(domainentry) jsonstruct['data'].append(domainentry) jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('domains', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('domains') + filectr += 1 await self.update_progress(pbar) - await self.write_json_to_zip('domains', jsonstruct) + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('domains', jsonstruct, filectr) await self.close_progress(pbar) if self.debug is True: with open('domains.json', 'w') as f: @@ -430,34 +483,26 @@ async def dump_domains(self): async def dump_users(self): pbar = await self.create_progress('Dumping users', self.totals['user']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'users', - 'version': 5, - 'count': 0 - } - } + jsonstruct = self.get_json_wrapper('users') + filectr = 0 async for ldapentry, err in self.connection.get_all_users(): - try: - entry = ldapentry.to_bh(self.domainname) - except Exception as e: - print(ldapentry) - raise + if err is not None: + raise err + + entry = ldapentry.to_bh(self.domainname) meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] entry['IsACLProtected'] = meta['IsACLProtected'] entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) if entry['_allowerdtodelegateto'] is not None: - seen = [] + seen = {} for host in entry['_allowerdtodelegateto']: try: target = host.split('/')[1] target = target.split(':')[0] except IndexError: - await self.print('[!] Invalid delegation target: %s', host) + logger.debug('[BH] Invalid delegation target: %s', host) continue try: sid = self.computer_sidcache[target.lower()] @@ -472,9 +517,14 @@ async def dump_users(self): jsonstruct['data'].append(entry) jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('users', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('users') + filectr += 1 await self.update_progress(pbar) - await self.write_json_to_zip('users', jsonstruct) + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('users', jsonstruct, filectr) await self.close_progress(pbar) if self.debug is True: @@ -483,16 +533,8 @@ async def dump_users(self): async def dump_computers(self): pbar = await self.create_progress('Dumping computers', self.totals['computer']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'computers', - 'version': 5, - 'count': 0 - } - } - + jsonstruct = self.get_json_wrapper('computers') + filectr = 0 async for ldapentry, err in self.connection.get_all_machines(): entry = ldapentry.to_bh(self.domainname) meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] @@ -501,7 +543,7 @@ async def dump_computers(self): if entry['_allowedtoactonbehalfofotheridentity'] is not None: allowedacl = base64.b64decode(entry['_allowedtoactonbehalfofotheridentity']) - entryres, relations = parse_binary_acl(entry, 'computer', allowedacl, self.schema) + _, entryres, relations = parse_binary_acl(entry['Properties']['distinguishedname'].upper(), entry, 'computer', allowedacl, self.schema) for ace in resolve_aces(relations, self.domainname, self.domainsid, self.ocache): if ace['RightName'] == 'Owner': @@ -520,7 +562,7 @@ async def dump_computers(self): target = host.split('/')[1] target = target.split(':')[0] except IndexError: - await self.print('[!] Invalid delegation target: %s', host) + logger.debug('[BH] Invalid delegation target: %s', host) continue try: sid = self.computer_sidcache[target.lower()] @@ -538,9 +580,14 @@ async def dump_computers(self): entry = self.remove_hidden(entry) jsonstruct['data'].append(entry) jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('computers', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('computers') + filectr += 1 await self.update_progress(pbar) - await self.write_json_to_zip('computers', jsonstruct) + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('computers', jsonstruct, filectr) await self.close_progress(pbar) if self.debug is True: @@ -549,16 +596,8 @@ async def dump_computers(self): async def dump_groups(self): pbar = await self.create_progress('Dumping groups', self.totals['group']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'groups', - 'version': 5, - 'count': 0 - } - } - + jsonstruct = self.get_json_wrapper('groups') + filectr = 0 async for ldapentry, err in self.connection.get_all_groups(): entry = ldapentry.to_bh(self.domainname) meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] @@ -580,9 +619,14 @@ async def dump_groups(self): entry = self.remove_hidden(entry) jsonstruct['data'].append(entry) jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('groups', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('groups') + filectr += 1 await self.update_progress(pbar) - await self.write_json_to_zip('groups', jsonstruct) + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('groups', jsonstruct, filectr) await self.close_progress(pbar) if self.debug is True: @@ -591,16 +635,8 @@ async def dump_groups(self): async def dump_gpos(self): pbar = await self.create_progress('Dumping GPOs', self.totals['gpo']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'gpos', - 'version': 5, - 'count': 0 - } - } - + jsonstruct = self.get_json_wrapper('gpos') + filectr = 0 async for ldapentry, err in self.connection.get_all_gpos(): entry = ldapentry.to_bh(self.domainname, self.domainsid) meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] @@ -610,9 +646,14 @@ async def dump_gpos(self): jsonstruct['data'].append(entry) jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('gpos', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('gpos') + filectr += 1 await self.update_progress(pbar) - await self.write_json_to_zip('gpos', jsonstruct) + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('gpos', jsonstruct, filectr) await self.close_progress(pbar) if self.debug is True: @@ -621,15 +662,8 @@ async def dump_gpos(self): async def dump_ous(self): pbar = await self.create_progress('Dumping OUs', self.totals['ou']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'ous', - 'version': 5, - 'count': 0 - } - } + jsonstruct = self.get_json_wrapper('ous') + filectr = 0 async for ldapentry, err in self.connection.get_all_ous(): if err is not None: @@ -644,9 +678,15 @@ async def dump_ous(self): jsonstruct['data'].append(entry) jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('ous', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('ous') + filectr += 1 + await self.update_progress(pbar) - await self.write_json_to_zip('ous', jsonstruct) + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('ous', jsonstruct, filectr) await self.close_progress(pbar) if self.debug is True: @@ -655,15 +695,8 @@ async def dump_ous(self): async def dump_containers(self): pbar = await self.create_progress('Dumping Containers', self.totals['container']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'containers', - 'version': 5, - 'count': 0 - } - } + jsonstruct = self.get_json_wrapper('containers') + filectr = 0 async for ldapentry, err in self.connection.get_all_containers(): if err is not None: raise err @@ -678,9 +711,14 @@ async def dump_containers(self): jsonstruct['data'].append(entry) jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('containers', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('containers') + filectr += 1 await self.update_progress(pbar) - await self.write_json_to_zip('containers', jsonstruct) + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('containers', jsonstruct, filectr) await self.close_progress(pbar) if self.debug is True: @@ -688,13 +726,13 @@ async def dump_containers(self): json.dump(jsonstruct, f) async def dump_ldap(self): - await self.print('[+] Connecting to LDAP server') - if isinstance(self.ldap_url, str): if self.ldap_url.startswith('adexplorer://'): self.ldap_url = self.ldap_url[13:] + await self.print('[+] Parsing ADEXPLORER Snapshot...') self.connection = await Snapshot.from_file(self.ldap_url) self.ldap_url = self.connection + await self.print('[+] Parsing done!') if isinstance(self.ldap_url, Snapshot) is False: if isinstance(self.ldap_url, str): @@ -709,23 +747,19 @@ async def dump_ldap(self): if isinstance(self.ldap_url, MSLDAPClientConnection): self.connection = MSLDAPClient(None, None, connection = self.ldap_url) - + + await self.print('[+] Connecting to LDAP server') self.connection.keepalive = True _, err = await self.connection.connect() if err is not None: raise err + await self.print('[+] Connected to LDAP serrver') self.ldapinfo = self.connection.get_server_info() self.domainname = self.ldapinfo['defaultNamingContext'].upper().replace('DC=','').replace(',','.') else: self.domainname = self.connection.rootdomain.upper().replace('DC=','').replace(',','.') - - - - await self.print('[+] Connected to LDAP serrver') - - await self.dump_schema() await self.dump_lookuptable() await self.dump_acls() diff --git a/msldap/external/adexplorersnapshot/parser/snapshot.py b/msldap/commons/adexplorer.py similarity index 88% rename from msldap/external/adexplorersnapshot/parser/snapshot.py rename to msldap/commons/adexplorer.py index d3c149f..fb0efb6 100644 --- a/msldap/external/adexplorersnapshot/parser/snapshot.py +++ b/msldap/commons/adexplorer.py @@ -1,20 +1,25 @@ + +# The following code was heavily inspired by the +# ADExplorerSnapshot.py project created by c3c +# +# The project was licensed under MIT at the time +# of creating this script, but did not include a +# LICENSE.md file +# https://github.com/c3c/ADExplorerSnapshot.py + import io import struct import enum -import uuid import calendar - +from datetime import datetime from typing import Dict, List -#from msldap.external.adexplorersnapshot.parser.classes import Header -#from msldap.external.adexplorersnapshot.parser.structure import structure -from winacl.dtyp.sid import SID -from msldap.external.bloodhoundpy.lib.cstruct import hexdump + +from msldap import logger from msldap.ldap_objects import MSADSchemaEntry, MSADInfo_ATTRS, MSADInfo, MSADContainer, MSADContainer_ATTRS, \ MSADDomainTrust_ATTRS, MSADDomainTrust, MSADOU, MSADOU_ATTRS, MSADUser, MSADUser_ATTRS, MSADGroup, MSADGroup_ATTRS,\ MSADMachine_ATTRS, MSADMachine, MSADGPO_ATTRS, MSADGPO from msldap.protocol.typeconversion import MSLDAP_BUILTIN_ATTRIBUTE_TYPES, LDAP_WELL_KNOWN_ATTRS -from datetime import datetime, tzinfo ENCODER_SPEFIFIC_FULCTIONS = [ 'single_bool', 'single_str', 'multi_str' @@ -330,6 +335,7 @@ def __init__(self): self.fileoffsetHigh = None self.fileoffsetEnd = None self.unk0x43a = None + self.mappingOffset = None @staticmethod def from_bytes(data:bytes): @@ -349,6 +355,7 @@ def from_buffer(buff): header.fileoffsetHigh = struct.unpack(" 0: - return self._dico - - for entry in self.obj.mappingTable: - prop = self.snap.properties[entry.attrIndex] - self._dico[prop.propName] = self.processAttribute(prop, entry.attrOffset, self.raw) - - return self._dico - - def getAttribute(self, attrName, raw=False): - attrIndex = self.snap.propertyDict[attrName] - prop = self.snap.properties[attrIndex] - - for entry in self.obj.mappingTable: - if entry.attrIndex == attrIndex: - return self.processAttribute(prop, entry.attrOffset, raw) - - raise KeyError - - def processAttribute(self, prop, attrOffset, raw): - - attrName = prop.propName.lower() - attrType = prop.adsType - - # at the offset at which the attribute is stored, - # - the first quad indicates how many elements are in the attribute (attributes can be multi-valued), - # - the bytes after depend on what sort of information is stored (e.g. for DN_STRING, the quads after are the offsets at which the element values are stored) - - fileAttrOffset = self.obj.fileOffset + attrOffset - self.fh.seek(fileAttrOffset) - numValues = structure.uint32(self.fh) - - values = [] - - # https://docs.microsoft.com/en-us/windows/win32/api/iads/ns-iads-adsvalue - # https://docs.microsoft.com/en-us/windows/win32/adsi/adsi-simple-data-types - - if attrType in [ADSTYPE_DN_STRING, ADSTYPE_CASE_IGNORE_STRING, ADSTYPE_CASE_IGNORE_STRING, ADSTYPE_PRINTABLE_STRING, ADSTYPE_NUMERIC_STRING, ADSTYPE_OBJECT_CLASS]: - offsets = structure.uint32[numValues](self.fh) - - for v in range(numValues): - self.fh.seek(fileAttrOffset + offsets[v]) # this can also be a negative offset, e.g. referencing data in a previous object - val = structure.wchar[None](self.fh) - values.append(val) - - elif attrType == ADSTYPE_OCTET_STRING: - lengths = structure.uint32[numValues](self.fh) - - for v in range(numValues): - octetStr = structure.char[lengths[v]](self.fh) - val = octetStr - - if not raw: - if len(octetStr) == 16 and attrName.endswith("guid"): - val = str(uuid.UUID(bytes_le=octetStr)) - elif attrName == 'objectsid': - val = str(LdapSid(BytesIO(octetStr))) - - values.append(val) - - elif attrType == ADSTYPE_BOOLEAN: - assert numValues == 1, ["Multiple boolean values, verify data size", self.fileOffset, attrName] - - for v in range(numValues): - val = bool(structure.uint32(self.fh)) # not sure if uint32 is correct type here, check against more data sets - values.append(val) - - elif attrType == ADSTYPE_INTEGER: - - for v in range(numValues): - # defined as DWORD, so reading as uint32 (unsigned) - val = structure.uint32(self.fh) - values.append(val) - - elif attrType == ADSTYPE_LARGE_INTEGER: - - for v in range(numValues): - # defined as LARGE_INTEGER, interestingly this is an int64 (signed) according to MS docs - val = structure.int64(self.fh) - values.append(val) - - elif attrType == ADSTYPE_UTC_TIME: # note that date/times can also be returned as Interval type instead (ADSTYPE_LARGE_INTEGER) - actual time units depend on which attribute is using it - - for v in range(numValues): - systime = SystemTime(self.snap) - val = systime.unixtimestamp - values.append(val) - - elif attrType == ADSTYPE_NT_SECURITY_DESCRIPTOR: - - for v in range(numValues): - lenDescriptorBytes = structure.uint32(self.fh) - descriptorBytes = self.fh.read(lenDescriptorBytes) - values.append(descriptorBytes) - - else: - if self.log: - self.log.warn("Unhandled adsType: %s -> %d" % (attrName, attrType)) - - return values - -class Object(WrapStruct): - def __init__(self, snap=None, in_obj=None): - super().__init__(snap, in_obj) - - self.fileOffset = self.fh.tell() - 4 - 4 - (self.tableSize * 8) - self.fh.seek(self.fileOffset + self.objSize) # move file pointer to the next object - - self.attributes = AttributeDict(self, raw=False) - self.raw_attributes = AttributeDict(self, raw=True) - - self.getObjectClasses = functools.lru_cache()(self.getObjectClasses) - self.getObjectCategory = functools.lru_cache()(self.getObjectCategory) - - def getObjectClasses(self): - return list(map(str.casefold, self.attributes.get('objectClass', []))) - - def getObjectCategory(self): - catDN = self.attributes.get('objectCategory', None) - if catDN is None: - return None - - catDN = catDN[0] - catObj = self.snap.classes.get(catDN, None) - if catObj: - return catObj.className.lower() - else: - return None - - classes = property(getObjectClasses) - category = property(getObjectCategory) - - # for easy compatibility with the bloodhound lib - def __getitem__(self, key): - if key == "attributes": - return self.attributes - elif key == "raw_attributes": - return self.raw_attributes - else: - return None - -class Property(WrapStruct): - def __init__(self, snap=None, in_obj=None): - super().__init__(snap, in_obj) - - self.propName = self.propName.rstrip('\x00') - self.DN = self.DN.rstrip('\x00') - self.schemaIDGUID = uuid.UUID(bytes_le=self.schemaIDGUID) - -class Class(WrapStruct): - def __init__(self, snap=None, in_obj=None): - super().__init__(snap, in_obj) - - self.className = self.className.rstrip('\x00') - self.DN = self.DN.rstrip('\x00') - self.schemaIDGUID = uuid.UUID(bytes_le=self.schemaIDGUID) - -class Header(WrapStruct): - def __init__(self, snap, in_obj=None): - super().__init__(snap, in_obj) - - self.server = self.server.rstrip('\x00') - self.mappingOffset = (self.fileoffsetHigh << 32) | self.fileoffsetLow - self.filetimeUnix = bh_dt_convert(self.filetime) #ADUtils.win_timestamp_to_unix(self.filetime) - -class Snapshot(object): - def __init__(self, fh, log=None): - self.fh = fh - self.log = log - self.objectOffsets = {} - - # the order in which we're parsing matters, due to the file handle's position - # typically, you would call as follows: - - # self.parseHeader() - # self.parseObjectOffsets() - # self.parseProperties() - # self.parseClasses() - # self.parseRights() - - def parseHeader(self): - self.fh.seek(0) - self.header = Header(self) - - def parseObjectOffsets(self): - self.fh.seek(0x43e) - - # we are only keeping offsets at this stage, as some databases grow very big - - if self.log: - prog = self.log.progress(f"Parsing object offsets", rate=0.1) - - self.objectOffsets = [] - for i in range(self.header.numObjects): - pos = self.fh.tell() - objSize = struct.unpack(" Date: Thu, 7 Dec 2023 10:13:12 +0100 Subject: [PATCH 3/6] some more fix --- msldap/external/bloodhoundpy/acls.py | 56 ++++++++++++--------- msldap/external/bloodhoundpy/lib/cstruct.py | 2 +- 2 files changed, 32 insertions(+), 26 deletions(-) diff --git a/msldap/external/bloodhoundpy/acls.py b/msldap/external/bloodhoundpy/acls.py index 33b4bf8..e101a3b 100644 --- a/msldap/external/bloodhoundpy/acls.py +++ b/msldap/external/bloodhoundpy/acls.py @@ -21,15 +21,15 @@ # SOFTWARE. # #################### -from __future__ import unicode_literals -import logging -from multiprocessing import Pool +#from __future__ import unicode_literals +import re +import binascii +from msldap import logger from msldap.external.bloodhoundpy.lib.cstruct import * from io import BytesIO -from future.utils import iteritems, native_str +#from future.utils import iteritems, native_str from struct import unpack, pack -import re -import binascii + def bin_to_string(uuid): uuid1, uuid2, uuid3 = unpack('" % (self.mask, ' | '.join(out)) @@ -593,7 +598,8 @@ def __init__(self, fh): def __repr__(self): out = [] - for name, value in iteritems(vars(ACE)): + #for name, value in iteritems(vars(ACE)): + for name, value in vars(ACE): if not name.startswith('_') and type(value) is int and self.has_flag(value): out.append(name) return "" % (self.ace.AceType, ' | '.join(out), self.ace.AceFlags, str(self.acedata)) diff --git a/msldap/external/bloodhoundpy/lib/cstruct.py b/msldap/external/bloodhoundpy/lib/cstruct.py index 3314e40..0de861d 100644 --- a/msldap/external/bloodhoundpy/lib/cstruct.py +++ b/msldap/external/bloodhoundpy/lib/cstruct.py @@ -22,7 +22,7 @@ # - Rework definition parsing, maybe pycparser? # - Change expression implementation # - Lazy reading? -from __future__ import print_function + import re import sys import ast From 2ce4b4a382e8f580e495543fa5f74ce092eae1c8 Mon Sep 17 00:00:00 2001 From: SkelSec Date: Fri, 8 Dec 2023 01:48:18 +0100 Subject: [PATCH 4/6] more bh fixes --- msldap/bloodhound.py | 5 +++-- msldap/ldap_objects/adcomp.py | 7 +++++-- msldap/ldap_objects/adgroup.py | 2 +- msldap/ldap_objects/adou.py | 2 +- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/msldap/bloodhound.py b/msldap/bloodhound.py index d9785eb..7c8a680 100644 --- a/msldap/bloodhound.py +++ b/msldap/bloodhound.py @@ -417,8 +417,9 @@ async def resolve_gplink(self, gplinks): for gplink_dn, options in parse_gplink_string(gplinks): link = {} link['IsEnforced'] = options == 2 - if reverse_dn_components(gplink_dn.upper()) in self.DNs: - lguid = self.DNs[reverse_dn_components(gplink_dn.upper())]['ObjectIdentifier'] + gplink_dn = gplink_dn.upper() + if gplink_dn in self.DNs: + lguid = self.ocache[self.DNs[gplink_dn]]['ObjectIdentifier'] else: attrs, err = await self.connection.dnattrs(gplink_dn.upper(), ['objectGUID', 'objectSid']) if err is not None: diff --git a/msldap/ldap_objects/adcomp.py b/msldap/ldap_objects/adcomp.py index 841e601..9ee0c74 100644 --- a/msldap/ldap_objects/adcomp.py +++ b/msldap/ldap_objects/adcomp.py @@ -262,6 +262,9 @@ def to_bh(self, domain): alloweddeleg = self.allowedtodelegateto if alloweddeleg is None: alloweddeleg = [] + compname = self.dNSHostName + if compname is None or compname == '': + compname = '%s.%s' % (self.sAMAccountName[:-1].upper(), domain.upper()) return { '_allowedtoactonbehalfofotheridentity' : actonbehalf, '_dns' : self.dNSHostName, @@ -278,12 +281,12 @@ def to_bh(self, domain): "Status": None, # no idea what this is "DumpSMSAPassword" : [], 'Properties' : { - 'name' : self.name, + 'name' : compname, 'domain' : domain, 'domainsid' : str(self.objectSid).rsplit('-',1)[0] , 'distinguishedname' : str(self.distinguishedName).upper(), 'unconstraineddelegation' : self.uac_to_textflag('UAC_TRUSTED_FOR_DELEGATION'), - 'enabled' : MSLDAP_UAC.ACCOUNTDISABLE in uac, + 'enabled' : MSLDAP_UAC.ACCOUNTDISABLE not in uac, 'trustedtoauth' : MSLDAP_UAC.TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION in uac, 'samaccountname' : self.sAMAccountName , 'haslaps' : self.ms_Mcs_AdmPwdExpirationTime is not None, diff --git a/msldap/ldap_objects/adgroup.py b/msldap/ldap_objects/adgroup.py index 8039686..1f6d832 100644 --- a/msldap/ldap_objects/adgroup.py +++ b/msldap/ldap_objects/adgroup.py @@ -123,7 +123,7 @@ def is_highvalue(sid:str): "IsDeleted": bool(self.isDeleted), "IsACLProtected": False , # Post processing 'Properties' : { - 'name' : self.name, + 'name' : '%s@%s' % (self.name.upper(), domain.upper()), 'domain' : domain, 'domainsid' : str(self.objectSid).rsplit('-',1)[0] , 'distinguishedname' : str(self.distinguishedName).upper(), diff --git a/msldap/ldap_objects/adou.py b/msldap/ldap_objects/adou.py index 8972f54..14f5e27 100644 --- a/msldap/ldap_objects/adou.py +++ b/msldap/ldap_objects/adou.py @@ -108,7 +108,7 @@ def to_bh(self, domain, domainsid): "IsDeleted": bool(self.isDeleted), "IsACLProtected": False , # Post processing 'Properties' : { - 'name' : self.name, + 'name' : '%s@%s' % (self.name.upper(), domain.upper()), 'domain' : domain, 'domainsid' : domainsid, 'distinguishedname' : str(self.distinguishedName).upper(), From 91bc549b295c17d5603367b5ee7f0c521ddc2ab8 Mon Sep 17 00:00:00 2001 From: SkelSec Date: Fri, 8 Dec 2023 20:59:42 +0100 Subject: [PATCH 5/6] compname fix, adding new sample to setup.py --- .github/workflows/python-app.yml | 66 -------------------------------- msldap/ldap_objects/adcomp.py | 5 +-- setup.py | 1 + 3 files changed, 2 insertions(+), 70 deletions(-) delete mode 100644 .github/workflows/python-app.yml diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml deleted file mode 100644 index 497135e..0000000 --- a/.github/workflows/python-app.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Build Windows Executable - PyInstaller -# Description: -# Most of my projects come with a build.bat script that uses PyInstaller to freeze the examples -# to an executable file. This Action will set up the envrionment and run this build.bat script, -# then upload the resulting executables to a google cloud bucket. -# Additionally the executables will be compressed and encrypted using 7z - -on: - push: - branches: - - master # Trigger on push to master branch - -jobs: - build: - runs-on: windows-latest # Use a Windows runner - permissions: - contents: 'read' - id-token: 'write' - - steps: - - uses: 'actions/checkout@v4' - with: - fetch-depth: 0 - - id: 'auth' - uses: 'google-github-actions/auth@v1' - with: - credentials_json: '${{ secrets.GCLOUD_BUCKET_SERVICE_USER_SECRET }}' - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: '3.9' - - - name: Determine Version - id: version - run: | - $VERSION = python -c "import sys; sys.path.append('${{ github.event.repository.name }}'); import _version; print(_version.__version__)" - echo "PROJVERSION=$VERSION" >> $GITHUB_STATE - shell: powershell - - - name: Install Dependencies - run: | - python -m pip install --upgrade pip - pip install pyinstaller virtualenv - - - name: Run Batch File to Build Executable - run: builder\pyinstaller\build.bat - - - name: Compress executables - run: | - "C:\Program Files\7-Zip\7z.exe a secure.7z builder\pyinstaller\*.exe -pprotected" - - - name: Upload Executable - uses: actions/upload-artifact@v2 - with: - name: executable - path: builder\pyinstaller\*.exe - - - name: 'Set up Cloud SDK' - uses: 'google-github-actions/setup-gcloud@v1' - with: - version: '>= 390.0.0' - - - name: Upload Executables to GCS - run: | - gsutil cp builder\pyinstaller\*.exe gs://skelsec-github-foss/${{ github.event.repository.name }}/${{ github.core.saveState.PROJVERSION }}/ diff --git a/msldap/ldap_objects/adcomp.py b/msldap/ldap_objects/adcomp.py index 9ee0c74..cab3cdb 100644 --- a/msldap/ldap_objects/adcomp.py +++ b/msldap/ldap_objects/adcomp.py @@ -262,9 +262,6 @@ def to_bh(self, domain): alloweddeleg = self.allowedtodelegateto if alloweddeleg is None: alloweddeleg = [] - compname = self.dNSHostName - if compname is None or compname == '': - compname = '%s.%s' % (self.sAMAccountName[:-1].upper(), domain.upper()) return { '_allowedtoactonbehalfofotheridentity' : actonbehalf, '_dns' : self.dNSHostName, @@ -281,7 +278,7 @@ def to_bh(self, domain): "Status": None, # no idea what this is "DumpSMSAPassword" : [], 'Properties' : { - 'name' : compname, + 'name' : '%s@%s' % (self.sAMAccountName.upper(), domain.upper()), 'domain' : domain, 'domainsid' : str(self.objectSid).rsplit('-',1)[0] , 'distinguishedname' : str(self.distinguishedName).upper(), diff --git a/setup.py b/setup.py index 7e341b3..85ca6bd 100644 --- a/setup.py +++ b/setup.py @@ -60,6 +60,7 @@ entry_points={ 'console_scripts': [ 'msldap = msldap.examples.msldapclient:main', + 'msldap-bloodhound = msldap.examples.msldapbloodhound:main', ], } ) From 3c1b2905475974c754af483c793cc5621622d22a Mon Sep 17 00:00:00 2001 From: Skelsec Date: Sat, 9 Dec 2023 15:32:41 +0100 Subject: [PATCH 6/6] upping reqs --- setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 85ca6bd..53170a2 100644 --- a/setup.py +++ b/setup.py @@ -48,10 +48,10 @@ ], install_requires=[ 'unicrypto>=0.0.10', - 'asyauth>=0.0.16', - 'asysocks>=0.2.9', + 'asyauth>=0.0.18', + 'asysocks>=0.2.11', 'asn1crypto>=1.3.0', - 'winacl>=0.1.7', + 'winacl>=0.1.8', 'prompt-toolkit>=3.0.2', 'tqdm', 'wcwidth',