diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml deleted file mode 100644 index 497135e..0000000 --- a/.github/workflows/python-app.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Build Windows Executable - PyInstaller -# Description: -# Most of my projects come with a build.bat script that uses PyInstaller to freeze the examples -# to an executable file. This Action will set up the envrionment and run this build.bat script, -# then upload the resulting executables to a google cloud bucket. -# Additionally the executables will be compressed and encrypted using 7z - -on: - push: - branches: - - master # Trigger on push to master branch - -jobs: - build: - runs-on: windows-latest # Use a Windows runner - permissions: - contents: 'read' - id-token: 'write' - - steps: - - uses: 'actions/checkout@v4' - with: - fetch-depth: 0 - - id: 'auth' - uses: 'google-github-actions/auth@v1' - with: - credentials_json: '${{ secrets.GCLOUD_BUCKET_SERVICE_USER_SECRET }}' - - - name: Set up Python - uses: actions/setup-python@v2 - with: - python-version: '3.9' - - - name: Determine Version - id: version - run: | - $VERSION = python -c "import sys; sys.path.append('${{ github.event.repository.name }}'); import _version; print(_version.__version__)" - echo "PROJVERSION=$VERSION" >> $GITHUB_STATE - shell: powershell - - - name: Install Dependencies - run: | - python -m pip install --upgrade pip - pip install pyinstaller virtualenv - - - name: Run Batch File to Build Executable - run: builder\pyinstaller\build.bat - - - name: Compress executables - run: | - "C:\Program Files\7-Zip\7z.exe a secure.7z builder\pyinstaller\*.exe -pprotected" - - - name: Upload Executable - uses: actions/upload-artifact@v2 - with: - name: executable - path: builder\pyinstaller\*.exe - - - name: 'Set up Cloud SDK' - uses: 'google-github-actions/setup-gcloud@v1' - with: - version: '>= 390.0.0' - - - name: Upload Executables to GCS - run: | - gsutil cp builder\pyinstaller\*.exe gs://skelsec-github-foss/${{ github.event.repository.name }}/${{ github.core.saveState.PROJVERSION }}/ diff --git a/msldap/bloodhound.py b/msldap/bloodhound.py new file mode 100644 index 0000000..7c8a680 --- /dev/null +++ b/msldap/bloodhound.py @@ -0,0 +1,779 @@ +import os +import zipfile +import json +import base64 +import datetime +import asyncio + +from tqdm import tqdm + +from msldap.external.bloodhoundpy.acls import parse_binary_acl +from msldap.external.bloodhoundpy.resolver import resolve_aces, WELLKNOWN_SIDS +from msldap.external.bloodhoundpy.utils import parse_gplink_string, is_filtered_container, is_filtered_container_child, reverse_dn_components, explode_dn +from msldap.commons.factory import LDAPConnectionFactory +from msldap.connection import MSLDAPClientConnection +from msldap.client import MSLDAPClient +from msldap.commons.adexplorer import Snapshot +from msldap import logger + +async def dummy_print(msg): + print(msg) + +class MSLDAPDump2Bloodhound: + def __init__(self, url: str or MSLDAPClient or LDAPConnectionFactory or MSLDAPClientConnection, progress = True, output_path = None, use_mp:bool=True, print_cb = None): + self.debug = False + self.ldap_url = url + self.connection: MSLDAPClient = None + self.ldapinfo = None + self.domainname = None + self.domainsid = None + self.use_mp = use_mp + self.mp_sdbatch_length = 5000 + self.print_cb = print_cb + self.with_progress = progress + if self.print_cb is None: + self.print_cb = dummy_print + + self.DNs = {} + self.DNs_sorted = {} + self.ocache = {} + self.schema = {} + self.aces = {} + self.computer_sidcache = {} + self.token_map = {} + + self.curdate = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S') + self.zipfilepath = '%s_Bloodhound.zip' % self.curdate + if output_path is not None: + self.zipfilepath = os.path.join(output_path, self.zipfilepath) + self.zipfile = None + self.MAX_ENTRIES_PER_FILE = 40000 + + self.totals = { + 'user' : 0, + 'computer' : 0, + 'group' : 0, + 'ou' : 0, + 'gpo' : 0, + 'container' : 0, + 'domain' : 0, + 'trust' : 0 + } + + async def print(self, msg:str): + await self.print_cb(msg) + + + async def create_progress(self, label, total = None): + if self.with_progress is True: + return tqdm(desc = label, total=total) + else: + await self.print('[+] %s' % label) + return None + + async def update_progress(self, pbar, value = 1): + if pbar is None: + return + if self.with_progress is True: + pbar.update(value) + + async def close_progress(self, pbar): + if pbar is None: + return + if self.with_progress is True: + pbar.close() + + def get_json_wrapper(self, enumtype): + return { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': enumtype, + 'version': 5, + 'count': 0 + } + } + + + def split_json(self, enumtype, data): + if data['meta']['count'] <= self.MAX_ENTRIES_PER_FILE: + yield data + return + + #split the data + for i in range(0, data['meta']['count'], self.MAX_ENTRIES_PER_FILE): + jsonstruct = { + 'data' : [], + 'meta': { + 'methods' : 0, + 'type': enumtype, + 'version': 5, + 'count': 0 + } + } + for entry in data['data'][i:i+self.MAX_ENTRIES_PER_FILE]: + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + yield jsonstruct + + + async def write_json_to_zip(self, enumtype, data, filepart = 0): + if filepart == 0: + filename = '%s_%s.json' % (self.curdate, enumtype) + else: + filename = '%s_%s_%02d.json' % (self.curdate, enumtype, filepart) + self.zipfile.writestr(filename, json.dumps(data)) + + + async def lookup_dn_children(self, parent_dn): + parent_dn = parent_dn.upper() + parent_dn_reversed = reverse_dn_components(parent_dn) + if parent_dn not in self.DNs: + logger.debug('[BH] DN not found: %s' % parent_dn_reversed) + return [] + + branch = self.DNs_sorted + level = 0 + for part in explode_dn(parent_dn_reversed): + level += 1 + if part not in branch: + logger.debug('[BH] Part not found: %s Full: %s Branch: %s Level: %s Parts: %s' % (part, parent_dn_reversed, branch.keys(), level, explode_dn(parent_dn_reversed))) + return [] + branch = branch[part] + + res_dns = [] + for dnpart in branch: + res_dns.append(dnpart + ',' + parent_dn) + + results = [] + for tdn in res_dns: + if is_filtered_container_child(tdn): + continue + if tdn not in self.DNs: + attrs, err = await self.connection.dnattrs(tdn, ['distinguishedName','objectGUID', 'objectClass','sAMAaccountType', 'sAMAccountName', 'objectSid', 'name']) + if err is not None: + raise err + if attrs is None or len(attrs) == 0: + logger.debug('[BH] Missing DN: %s' % tdn) + continue + res = self.resolve_entry(attrs) + results.append({ + 'ObjectIdentifier': res['objectid'].upper(), + 'ObjectType': res['type'].capitalize(), + }) + continue + entry = self.ocache[self.DNs[tdn]] + results.append({ + 'ObjectIdentifier': entry['ObjectIdentifier'].upper(), + 'ObjectType': entry['ObjectType'].capitalize() if entry['ObjectType'].lower() != 'ou' else 'OU', + }) + + return results + + async def dump_schema(self): + pbar = await self.create_progress('Dumping schema') + # manual stuff here... + # https://learn.microsoft.com/en-us/windows/win32/adschema/c-foreignsecurityprincipal + self.schema['foreignsecurityprincipal'] = '89e31c12-8530-11d0-afda-00c04fd930c9' + + async for entry, err in self.connection.get_all_schemaentry(['name', 'schemaIDGUID']): + if err is not None: + raise err + await self.update_progress(pbar) + self.schema[entry.name.lower()] = str(entry.schemaIDGUID) + await self.close_progress(pbar) + + def add_ocache(self, dn, objectid, principal, otype, dns = '', spns = None): + self.totals[otype] += 1 + if objectid in WELLKNOWN_SIDS: + objectid = '%s-%s' % (self.domainname.upper(), objectid.upper()) + self.ocache[objectid] = { + 'dn' : dn.upper(), + 'ObjectIdentifier' : objectid, + 'principal' : principal, + 'ObjectType' : otype, + } + self.DNs[dn.upper()] = objectid + if otype == 'computer': + entry = { + 'ObjectIdentifier' : objectid, + 'ObjectType' : otype + } + if dns is None: + dns = '' + self.computer_sidcache[dns.lower()] = entry + if spns is not None: + for spn in spns: + target = spn.split('/')[1] + target = target.split(':')[0] + self.computer_sidcache[target.lower()] = entry + + def resolve_entry(self, entry): + # I really REALLY did not want to implement this + resolved = {} + account = entry.get('sAMAccountName', '') + dn = entry.get('distinguishedName', '') + resolved['objectid'] = entry.get('objectSid', '') + resolved['principal'] = ('%s@%s' % (account, self.domainname)).upper() + if 'sAMAaccountName' in entry: + accountType = entry['sAMAccountType'] + object_class = entry['objectClass'] + if accountType in [268435456, 268435457, 536870912, 536870913]: + resolved['type'] = 'Group' + elif accountType in [805306368] or \ + 'msDS-GroupManagedServiceAccount' in object_class or \ + 'msDS-ManagedServiceAccount' in object_class: + resolved['type'] = 'User' + elif accountType in [805306369]: + resolved['type'] = 'Computer' + short_name = account.rstrip('$') + resolved['principal'] = ('%s.%s' % (short_name, self.domainname)).upper() + elif accountType in [805306370]: + resolved['type'] = 'trustaccount' + else: + resolved['type'] = 'Domain' + return resolved + + if 'objectGUID' in entry: + resolved['objectid'] = entry['objectGUID'] + resolved['principal'] = ('%s@%s' % (entry.get('name', ''), self.domainname)).upper() + object_class = entry.get('objectClass', []) + if 'organizationalUnit' in object_class: + resolved['type'] = 'OU' + elif 'container' in object_class: + resolved['type'] = 'Container' + else: + resolved['type'] = 'Base' + return resolved + + async def dump_lookuptable(self): + pbar = await self.create_progress('Generating lookuptable') + # domains + adinfo, err = await self.connection.get_ad_info() + if err is not None: + raise err + self.domainsid = adinfo.objectSid + self.add_ocache(adinfo.distinguishedName, adinfo.objectSid, '', 'domain') + await self.update_progress(pbar) + + #trusts + async for entry, err in self.connection.get_all_trusts(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'trust') + await self.update_progress(pbar) + + #users + async for entry, err in self.connection.get_all_users(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName']): + if err is not None: + raise err + short_name = entry.sAMAccountName + self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'user') + await self.update_progress(pbar) + + #machines + async for entry, err in self.connection.get_all_machines(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName', 'dNSHostName', 'servicePrincipalName']): + if err is not None: + raise err + short_name = entry.sAMAccountName + dns = entry.dNSHostName + if dns is None: + dns = '' + + self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'computer', dns, entry.servicePrincipalName) + await self.update_progress(pbar) + + #groups + async for entry, err in self.connection.get_all_groups(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectSid, '', 'group') + await self.update_progress(pbar) + + #ous + async for entry, err in self.connection.get_all_ous(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'ou') + await self.update_progress(pbar) + + #containers + async for entry, err in self.connection.get_all_containers(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + if is_filtered_container(entry.distinguishedName): + continue + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'container') + await self.update_progress(pbar) + + #gpos + async for entry, err in self.connection.get_all_gpos(['distinguishedName', 'objectSid', 'objectGUID']): + if err is not None: + raise err + self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'gpo') + await self.update_progress(pbar) + + #foreignsecurityprincipal + async for entry, err in self.connection.get_all_foreignsecurityprincipals(['name','sAMAccountName', 'objectSid', 'objectGUID', 'distinguishedName', 'objectClass']): + bhentry = {} + entry = entry['attributes'] + if 'container' in entry.get('objectClass', []) is True: + continue + + if entry['objectSid'] in WELLKNOWN_SIDS: + bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) + bhentry['principal'] = self.domainname.upper() + bhentry['type'] = 'foreignsecurityprincipal' + if 'name' in entry: + if entry['name'] in WELLKNOWN_SIDS: + gname, sidtype = WELLKNOWN_SIDS[entry['name']] + bhentry['type'] = sidtype.capitalize() + bhentry['principal'] = '%s@%s' % (gname.upper(), self.domainname.upper()) + bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) + else: + bhentry['objectid'] = entry['name'] + + self.ocache[bhentry['objectid']] = { + 'dn' : entry['distinguishedName'].upper(), + 'ObjectIdentifier' : bhentry['objectid'], + 'principal' : bhentry['principal'], + 'ObjectType' : bhentry['type'], + } + self.DNs[entry['distinguishedName'].upper()] = bhentry['objectid'] + + await self.close_progress(pbar) + + for dn in [reverse_dn_components(dn) for dn in self.DNs]: + branch = self.DNs_sorted + for part in explode_dn(dn): + if part not in branch: + branch[part.upper()] = {} + branch = branch[part.upper()] + + if self.debug is True: + with open('dn.json', 'w') as f: + json.dump(self.DNs, f, indent=4) + + with open('dntree.json', 'w') as f: + json.dump(self.DNs_sorted, f, indent=4) + + async def dump_acls(self): + sdbatch = [] + tasks = [] + pbar = await self.create_progress('Dumping SDs', total=len(self.ocache)) + for sid in self.ocache: + dn = self.ocache[sid]['dn'] + secdesc, err = await self.connection.get_objectacl_by_dn(dn) + if err is not None: + raise err + dn = dn.upper() + oentry = { + 'IsACLProtected' : None, + 'Properties' : { + 'haslaps' : 'ms-mcs-admpwd' in self.schema + } + } + otype = self.ocache[sid]['ObjectType'] + if otype == 'trust': + continue + if otype == 'ou': + otype = 'organizational-unit' + if dn.upper() not in self.aces: + if self.use_mp is True: + from concurrent.futures import ProcessPoolExecutor + sdbatch.append((dn, oentry, otype.lower(), secdesc, self.schema)) + if len(sdbatch) > self.mp_sdbatch_length: + loop = asyncio.get_running_loop() + with ProcessPoolExecutor() as executor: + for sde in sdbatch: + tasks.append(loop.run_in_executor(executor, parse_binary_acl, *sde)) + results = await asyncio.gather(*tasks) + for dn, aces, relations in results: + self.aces[dn.upper()] = (aces, relations) + sdbatch = [] + tasks = [] + else: + dn, aces, relations = parse_binary_acl(dn, oentry, otype.lower(), secdesc, self.schema) + self.aces[dn.upper()] = (aces, relations) + await self.update_progress(pbar) + + if len(sdbatch) != 0: + loop = asyncio.get_running_loop() + with ProcessPoolExecutor() as executor: + for sde in sdbatch: + tasks.append(loop.run_in_executor(executor, parse_binary_acl, *sde)) + results = await asyncio.gather(*tasks) + for dn, aces, relations in results: + self.aces[dn.upper()] = (aces, relations) + sdbatch = [] + tasks = [] + await self.close_progress(pbar) + + async def resolve_gplink(self, gplinks): + if gplinks is None: + return [] + + links = [] + for gplink_dn, options in parse_gplink_string(gplinks): + link = {} + link['IsEnforced'] = options == 2 + gplink_dn = gplink_dn.upper() + if gplink_dn in self.DNs: + lguid = self.ocache[self.DNs[gplink_dn]]['ObjectIdentifier'] + else: + attrs, err = await self.connection.dnattrs(gplink_dn.upper(), ['objectGUID', 'objectSid']) + if err is not None: + raise err + if attrs is None or len(attrs) == 0: + logger.debug('[BH] Missing DN: %s' % gplink_dn) + continue + try: + lguid = attrs['objectGUID'] + except: + logger.debug('[BH] Missing GUID for %s' % gplink_dn) + continue + link['GUID'] = lguid.upper() + links.append(link) + return links + + def remove_hidden(self, entry): + to_del = [] + for k in entry: + if k.startswith('_'): + to_del.append(k) + for k in to_del: + del entry[k] + return entry + + async def dump_domains(self): + pbar = await self.create_progress('Dumping domains', self.totals['domain']) + adinfo, err = await self.connection.get_ad_info() + if err is not None: + raise err + + domainentry = adinfo.to_bh(self.domainname) + + meta, relations = self.aces[domainentry['Properties']['distinguishedname'].upper()] + domainentry['IsACLProtected'] = meta['IsACLProtected'] + domainentry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + domainentry['ChildObjects'] = await self.lookup_dn_children(domainentry['Properties']['distinguishedname']) + domainentry['Links'] = await self.resolve_gplink(domainentry['_gPLink']) + + jsonstruct = self.get_json_wrapper('domains') + filectr = 0 + async for entry, err in self.connection.get_all_trusts(): + if err is not None: + raise err + domainentry['Trusts'].append(entry.to_bh()) + + domainentry = self.remove_hidden(domainentry) + jsonstruct['data'].append(domainentry) + jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('domains', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('domains') + filectr += 1 + await self.update_progress(pbar) + + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('domains', jsonstruct, filectr) + await self.close_progress(pbar) + if self.debug is True: + with open('domains.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_users(self): + pbar = await self.create_progress('Dumping users', self.totals['user']) + + jsonstruct = self.get_json_wrapper('users') + filectr = 0 + async for ldapentry, err in self.connection.get_all_users(): + if err is not None: + raise err + + entry = ldapentry.to_bh(self.domainname) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + + if entry['_allowerdtodelegateto'] is not None: + seen = {} + for host in entry['_allowerdtodelegateto']: + try: + target = host.split('/')[1] + target = target.split(':')[0] + except IndexError: + logger.debug('[BH] Invalid delegation target: %s', host) + continue + try: + sid = self.computer_sidcache[target.lower()] + if sid['ObjectIdentifier'] in seen: + continue + seen[sid['ObjectIdentifier']] = 1 + entry['AllowedToDelegate'].append(sid) + except KeyError: + if '.' in target: + entry['AllowedToDelegate'].append(target.upper()) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('users', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('users') + filectr += 1 + await self.update_progress(pbar) + + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('users', jsonstruct, filectr) + await self.close_progress(pbar) + + if self.debug is True: + with open('users.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_computers(self): + pbar = await self.create_progress('Dumping computers', self.totals['computer']) + jsonstruct = self.get_json_wrapper('computers') + filectr = 0 + async for ldapentry, err in self.connection.get_all_machines(): + entry = ldapentry.to_bh(self.domainname) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + + if entry['_allowedtoactonbehalfofotheridentity'] is not None: + allowedacl = base64.b64decode(entry['_allowedtoactonbehalfofotheridentity']) + _, entryres, relations = parse_binary_acl(entry['Properties']['distinguishedname'].upper(), entry, 'computer', allowedacl, self.schema) + + for ace in resolve_aces(relations, self.domainname, self.domainsid, self.ocache): + if ace['RightName'] == 'Owner': + continue + if ace['RightName'] == 'GenericAll': + entryres['AllowedToAct'].append({ + 'ObjectIdentifier': ace['PrincipalSID'], + 'ObjectType': ace['PrincipalType'].capitalize() + }) + + del entry['_allowedtoactonbehalfofotheridentity'] + if entry['Properties']['allowedtodelegate'] is not None: + seen = {} + for host in entry['Properties']['allowedtodelegate']: + try: + target = host.split('/')[1] + target = target.split(':')[0] + except IndexError: + logger.debug('[BH] Invalid delegation target: %s', host) + continue + try: + sid = self.computer_sidcache[target.lower()] + if sid['ObjectIdentifier'] in seen: + continue + seen[sid['ObjectIdentifier']] = 1 + entry['AllowedToDelegate'].append(sid) + except KeyError: + if '.' in target: + entry['AllowedToDelegate'].append({ + "ObjectIdentifier": target.upper(), + "ObjectType": "Computer" + }) + + entry = self.remove_hidden(entry) + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('computers', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('computers') + filectr += 1 + await self.update_progress(pbar) + + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('computers', jsonstruct, filectr) + await self.close_progress(pbar) + + if self.debug is True: + with open('computers.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_groups(self): + pbar = await self.create_progress('Dumping groups', self.totals['group']) + jsonstruct = self.get_json_wrapper('groups') + filectr = 0 + async for ldapentry, err in self.connection.get_all_groups(): + entry = ldapentry.to_bh(self.domainname) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + + if ldapentry.member is not None: + for member in ldapentry.member: + if member.upper() in self.DNs: + oid = self.DNs[member.upper()] + entry['Members'].append({ + 'ObjectIdentifier' : self.ocache[oid]['ObjectIdentifier'], + 'ObjectType' : self.ocache[oid]['ObjectType'].capitalize() + }) + else: + if member.find('ForeignSecurityPrincipals') != -1: + continue + + entry = self.remove_hidden(entry) + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('groups', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('groups') + filectr += 1 + await self.update_progress(pbar) + + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('groups', jsonstruct, filectr) + await self.close_progress(pbar) + + if self.debug is True: + with open('groups.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_gpos(self): + pbar = await self.create_progress('Dumping GPOs', self.totals['gpo']) + jsonstruct = self.get_json_wrapper('gpos') + filectr = 0 + async for ldapentry, err in self.connection.get_all_gpos(): + entry = ldapentry.to_bh(self.domainname, self.domainsid) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('gpos', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('gpos') + filectr += 1 + await self.update_progress(pbar) + + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('gpos', jsonstruct, filectr) + await self.close_progress(pbar) + + if self.debug is True: + with open('gpos.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_ous(self): + pbar = await self.create_progress('Dumping OUs', self.totals['ou']) + jsonstruct = self.get_json_wrapper('ous') + filectr = 0 + + async for ldapentry, err in self.connection.get_all_ous(): + if err is not None: + raise err + entry = ldapentry.to_bh(self.domainname, self.domainsid) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) + entry['Links'] = await self.resolve_gplink(entry['_gPLink']) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('ous', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('ous') + filectr += 1 + + await self.update_progress(pbar) + + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('ous', jsonstruct, filectr) + await self.close_progress(pbar) + + if self.debug is True: + with open('ous.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_containers(self): + pbar = await self.create_progress('Dumping Containers', self.totals['container']) + jsonstruct = self.get_json_wrapper('containers') + filectr = 0 + async for ldapentry, err in self.connection.get_all_containers(): + if err is not None: + raise err + if is_filtered_container(ldapentry.distinguishedName): + continue + entry = ldapentry.to_bh(self.domainname, self.domainsid) + meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] + entry['IsACLProtected'] = meta['IsACLProtected'] + entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) + entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) + entry = self.remove_hidden(entry) + + jsonstruct['data'].append(entry) + jsonstruct['meta']['count'] += 1 + if jsonstruct['meta']['count'] == self.MAX_ENTRIES_PER_FILE: + await self.write_json_to_zip('containers', jsonstruct, filectr) + jsonstruct = self.get_json_wrapper('containers') + filectr += 1 + await self.update_progress(pbar) + + if jsonstruct['meta']['count'] > 0: + await self.write_json_to_zip('containers', jsonstruct, filectr) + await self.close_progress(pbar) + + if self.debug is True: + with open('containers.json', 'w') as f: + json.dump(jsonstruct, f) + + async def dump_ldap(self): + if isinstance(self.ldap_url, str): + if self.ldap_url.startswith('adexplorer://'): + self.ldap_url = self.ldap_url[13:] + await self.print('[+] Parsing ADEXPLORER Snapshot...') + self.connection = await Snapshot.from_file(self.ldap_url) + self.ldap_url = self.connection + await self.print('[+] Parsing done!') + + if isinstance(self.ldap_url, Snapshot) is False: + if isinstance(self.ldap_url, str): + factory = LDAPConnectionFactory.from_url(self.ldap_url) + self.connection = factory.get_client() + self.connection.keepalive = True + if isinstance(self.ldap_url, LDAPConnectionFactory): + self.connection = self.ldap_url.get_client() + self.connection.keepalive = True + if isinstance(self.ldap_url, MSLDAPClient): + self.connection = self.ldap_url + + if isinstance(self.ldap_url, MSLDAPClientConnection): + self.connection = MSLDAPClient(None, None, connection = self.ldap_url) + + await self.print('[+] Connecting to LDAP server') + self.connection.keepalive = True + _, err = await self.connection.connect() + if err is not None: + raise err + await self.print('[+] Connected to LDAP serrver') + + self.ldapinfo = self.connection.get_server_info() + self.domainname = self.ldapinfo['defaultNamingContext'].upper().replace('DC=','').replace(',','.') + else: + self.domainname = self.connection.rootdomain.upper().replace('DC=','').replace(',','.') + + await self.dump_schema() + await self.dump_lookuptable() + await self.dump_acls() + with zipfile.ZipFile(self.zipfilepath, 'w', zipfile.ZIP_DEFLATED) as self.zipfile: + await self.dump_domains() + await self.dump_users() + await self.dump_computers() + await self.dump_groups() + await self.dump_gpos() + await self.dump_ous() + await self.dump_containers() + + + + async def run(self): + await self.dump_ldap() \ No newline at end of file diff --git a/msldap/client.py b/msldap/client.py index c718b56..8f76b5e 100644 --- a/msldap/client.py +++ b/msldap/client.py @@ -309,6 +309,14 @@ async def get_all_containers(self, attrs:List[str] = MSADContainer_ATTRS): return yield MSADContainer.from_ldap(entry), None #self._ldapinfo logger.debug('Finished polling for entries!') + + async def get_all_foreignsecurityprincipals(self, attrs:List[str]): + ldap_filter = r'(&(objectClass=foreignSecurityPrincipal)(objectCategory=foreignSecurityPrincipal))' + async for entry, err in self.pagedsearch(ldap_filter, attrs): + if err is not None: + yield None, err + return + yield entry, None async def get_all_laps(self): """ diff --git a/msldap/commons/adexplorer.py b/msldap/commons/adexplorer.py new file mode 100644 index 0000000..fb0efb6 --- /dev/null +++ b/msldap/commons/adexplorer.py @@ -0,0 +1,833 @@ + +# The following code was heavily inspired by the +# ADExplorerSnapshot.py project created by c3c +# +# The project was licensed under MIT at the time +# of creating this script, but did not include a +# LICENSE.md file +# https://github.com/c3c/ADExplorerSnapshot.py + +import io +import struct +import enum +import calendar +from datetime import datetime +from typing import Dict, List + +from msldap import logger +from msldap.ldap_objects import MSADSchemaEntry, MSADInfo_ATTRS, MSADInfo, MSADContainer, MSADContainer_ATTRS, \ + MSADDomainTrust_ATTRS, MSADDomainTrust, MSADOU, MSADOU_ATTRS, MSADUser, MSADUser_ATTRS, MSADGroup, MSADGroup_ATTRS,\ + MSADMachine_ATTRS, MSADMachine, MSADGPO_ATTRS, MSADGPO + +from msldap.protocol.typeconversion import MSLDAP_BUILTIN_ATTRIBUTE_TYPES, LDAP_WELL_KNOWN_ATTRS + +ENCODER_SPEFIFIC_FULCTIONS = [ + 'single_bool', 'single_str', 'multi_str' +] + +class ADSTYPE(enum.Enum): + INVALID = 0 + DN_STRING = 1 + CASE_EXACT_STRING = 2 + CASE_IGNORE_STRING = 3 + PRINTABLE_STRING = 4 + NUMERIC_STRING = 5 + BOOLEAN = 6 + INTEGER = 7 + OCTET_STRING = 8 + UTC_TIME = 9 + LARGE_INTEGER = 10 + PROV_SPECIFIC = 11 + OBJECT_CLASS = 12 + CASEIGNORE_LIST = 13 + OCTET_LIST = 14 + PATH = 15 + POSTALADDRESS = 16 + TIMESTAMP = 17 + BACKLINK = 18 + TYPEDNAME = 19 + HOLD = 20 + NETADDRESS = 21 + REPLICAPOINTER = 22 + FAXNUMBER = 23 + EMAIL = 24 + NT_SECURITY_DESCRIPTOR = 25 + UNKNOWN = 26 + DN_WITH_BINARY = 27 + DN_WITH_STRING = 28 + +class SystemTime: + def __init__(self): + self.year = None + self.month = None + self.dayOfWeek = None + self.day = None + self.hour = None + self.minute = None + self.second = None + self.milliseconds = None + + @staticmethod + def from_bytes(data:bytes): + return SystemTime.from_buffer(io.BytesIO(data)) + + @staticmethod + def from_buffer(buff): + st = SystemTime() + st.year = struct.unpack(" self.max_cache_size: + self.dncache.popitem() + + def parse_entry(self, raw_entry:List[str]): + ed = {} + for line in raw_entry: + line = line.strip() + if line == '': + continue + if line.startswith('#'): + continue + + key, value = line.split(':', 1) + key = key.strip() + value = value.strip() + + if line.split(':', 1)[0].endswith('::'): + value = base64.b64decode(value) + + if key not in ed: + ed[key] = [] + + ed[key].append(value) + return ed + + async def parse(self): + await self.build_index() + \ No newline at end of file diff --git a/msldap/commons/utils.py b/msldap/commons/utils.py index 6f83ec7..7475964 100644 --- a/msldap/commons/utils.py +++ b/msldap/commons/utils.py @@ -49,22 +49,7 @@ def bh_dt_convert(dt:datetime.datetime): return -1 ts = max(0,int(dt.timestamp())) return ts - - -#taken from bloodhound.py -def is_filtered_container(containerdn): - if "CN=DOMAINUPDATES,CN=SYSTEM,DC=" in containerdn.upper(): - return True - if "CN=POLICIES,CN=SYSTEM,DC=" in containerdn.upper() and (containerdn.upper().startswith('CN=USER') or containerdn.upper().startswith('CN=MACHINE')): - return True - return False -def is_filtered_container_child(containerdn): - if "CN=PROGRAM DATA,DC=" in containerdn.upper(): - return True - if "CN=SYSTEM,DC=" in containerdn.upper(): - return True - return False FUNCTIONAL_LEVELS = { 0: "2000 Mixed/Native", diff --git a/msldap/examples/msldapbloodhound.py b/msldap/examples/msldapbloodhound.py index 8c646fa..5fad719 100644 --- a/msldap/examples/msldapbloodhound.py +++ b/msldap/examples/msldapbloodhound.py @@ -1,651 +1,5 @@ -import zipfile -import json -import base64 import asyncio -import datetime - -from tqdm import tqdm - -from msldap.external.bloodhoundpy.acls import parse_binary_acl -from msldap.external.bloodhoundpy.resolver import resolve_aces, WELLKNOWN_SIDS -from msldap.commons.utils import is_filtered_container, is_filtered_container_child -from msldap.commons.factory import LDAPConnectionFactory - - - -def reverse_dn_components(dn:str): - rdns = ','.join(reversed(dn.split(','))) - return rdns.upper() - -def explode_dn(dn): - parts = [] - esc = False - part = '' - - for char in dn: - if esc: - part += char - esc = False - elif char == '\\': - esc = True - part += char - elif char == ',': - if part: - parts.append(part) - part = '' - else: - part += char - - if part: - parts.append(part) - - return parts - - -def parse_gplink_string(linkstr): - if not linkstr: - return - for links in linkstr.split('[LDAP://')[1:]: - dn, options = links.rstrip('][').split(';') - yield dn, int(options) - - -class MSLDAPDump2Bloodhound: - def __init__(self, url): - self.ldap_url = url - self.connection = None - self.ldapinfo = None - self.domainname = None - self.domainsid = None - self.with_progress = True - - self.DNs = {} - self.DNs_sorted = {} - self.ocache = {} - self.schema = {} - self.aces = {} - self.computer_sidcache = {} - self.token_map = {} - - self.curdate = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S') - self.zipfilepath = '%s_Bloodhound.zip' % self.curdate - self.zipfile = None - - self.totals = { - 'user' : 0, - 'computer' : 0, - 'group' : 0, - 'ou' : 0, - 'gpo' : 0, - 'container' : 0, - 'domain' : 0, - 'trust' : 0 - } - - - def create_progress(self, label, total = None): - if self.with_progress is True: - return tqdm(desc = label, total=total) - else: - print('[+] %s' % label) - return None - - def update_progress(self, pbar, value = 1): - if pbar is None: - return - if self.with_progress is True: - pbar.update(value) - - def close_progress(self, pbar): - if pbar is None: - return - if self.with_progress is True: - pbar.close() - - async def lookup_dn_children(self, parent_dn): - parent_dn = parent_dn.upper() - parent_dn_reversed = reverse_dn_components(parent_dn) - if parent_dn not in self.DNs: - print('DN not found: %s' % parent_dn_reversed) - return [] - - branch = self.DNs_sorted - level = 0 - for part in explode_dn(parent_dn_reversed): - level += 1 - if part not in branch: - print('Part not found: %s Full: %s Branch: %s Level: %s Parts: %s' % (part, parent_dn_reversed, branch.keys(), level, explode_dn(parent_dn_reversed))) - return [] - branch = branch[part] - - res_dns = [] - for dnpart in branch: - res_dns.append(dnpart + ',' + parent_dn) - - results = [] - for tdn in res_dns: - if is_filtered_container_child(tdn): - continue - if tdn not in self.DNs: - print('Missing %s' % tdn) - continue - #attrs, err = await self.connection.dnattrs(tdn, ['objectGUID', 'objectClass','sAMAaccountType', 'sAMAccountName', 'objectSid']) - #print(attrs) - entry = self.ocache[self.DNs[tdn]] - results.append({ - 'ObjectIdentifier': entry['ObjectIdentifier'].upper(), - 'ObjectType': entry['ObjectType'].capitalize() if entry['ObjectType'].lower() != 'ou' else 'OU', - }) - - return results - - async def dump_schema(self): - pbar = self.create_progress('Dumping schema') - async for entry, err in self.connection.get_all_schemaentry(['name', 'schemaIDGUID']): - if err is not None: - raise err - self.update_progress(pbar) - self.schema[entry.name.lower()] = str(entry.schemaIDGUID) - self.close_progress(pbar) - - def add_ocache(self, dn, objectid, principal, otype, dns = '', spns = None): - self.totals[otype] += 1 - if objectid in WELLKNOWN_SIDS: - objectid = '%s-%s' % (self.domainname.upper(), objectid.upper()) - self.ocache[objectid] = { - 'dn' : dn.upper(), - 'ObjectIdentifier' : objectid, - 'principal' : principal, - 'ObjectType' : otype, - } - self.DNs[dn.upper()] = objectid - if otype == 'computer': - entry = { - 'ObjectIdentifier' : objectid, - 'ObjectType' : otype - } - if dns is None: - dns = '' - self.computer_sidcache[dns.lower()] = entry - if spns is not None: - for spn in spns: - target = spn.split('/')[1] - target = target.split(':')[0] - self.computer_sidcache[target.lower()] = entry - - async def dump_lookuptable(self): - pbar = self.create_progress('Generating lookuptable') - # domains - adinfo, err = await self.connection.get_ad_info() - if err is not None: - raise err - self.domainsid = adinfo.objectSid - self.add_ocache(adinfo.distinguishedName, adinfo.objectSid, '', 'domain') - self.update_progress(pbar) - - #trusts - async for entry, err in self.connection.get_all_trusts(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'trust') - self.update_progress(pbar) - - #users - async for entry, err in self.connection.get_all_users(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName']): - if err is not None: - raise err - short_name = entry.sAMAccountName - self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'user') - self.update_progress(pbar) - - #machines - async for entry, err in self.connection.get_all_machines(['distinguishedName', 'objectSid', 'objectGUID', 'sAMAccountName', 'dNSHostName', 'servicePrincipalName']): - if err is not None: - raise err - short_name = entry.sAMAccountName - dns = entry.dNSHostName - if dns is None: - dns = '' - self.add_ocache(entry.distinguishedName, entry.objectSid, ('%s@%s' % (short_name, self.domainname)).upper(), 'computer', dns, entry.servicePrincipalName) - self.update_progress(pbar) - - #groups - async for entry, err in self.connection.get_all_groups(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectSid, '', 'group') - self.update_progress(pbar) - - #ous - async for entry, err in self.connection.get_all_ous(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'ou') - self.update_progress(pbar) - - #containers - async for entry, err in self.connection.get_all_containers(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - if is_filtered_container(entry.distinguishedName): - continue - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'container') - self.update_progress(pbar) - - #gpos - async for entry, err in self.connection.get_all_gpos(['distinguishedName', 'objectSid', 'objectGUID']): - if err is not None: - raise err - self.add_ocache(entry.distinguishedName, entry.objectGUID, '', 'gpo') - self.update_progress(pbar) - - #foreignsecurityprincipal - query = '(&(objectClass=foreignSecurityPrincipal)(objectCategory=foreignSecurityPrincipal))' - async for entry, err in self.connection.pagedsearch(query, ['name','sAMAccountName', 'objectSid', 'objectGUID', 'distinguishedName', 'objectClass']): - bhentry = {} - entry = entry['attributes'] - if entry['objectSid'] in WELLKNOWN_SIDS: - bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) - bhentry['principal'] = self.domainname.upper() - bhentry['type'] = 'foreignsecurityprincipal' - if 'name' in entry: - if entry['name'] in WELLKNOWN_SIDS: - gname, sidtype = WELLKNOWN_SIDS[entry['name']] - bhentry['type'] = sidtype.capitalize() - bhentry['principal'] = '%s@%s' % (gname.upper(), self.domainname.upper()) - bhentry['objectid'] = '%s-%s' % (self.domainname.upper(), entry['objectSid'].upper()) - else: - bhentry['objectid'] = entry['name'] - - self.ocache[bhentry['objectid']] = { - 'dn' : entry['distinguishedName'].upper(), - 'ObjectIdentifier' : bhentry['objectid'], - 'principal' : bhentry['principal'], - 'ObjectType' : bhentry['type'], - } - self.DNs[entry['distinguishedName'].upper()] = bhentry['objectid'] - - print(entry) - - self.close_progress(pbar) - - for dn in [reverse_dn_components(dn) for dn in self.DNs]: - branch = self.DNs_sorted - for part in explode_dn(dn): - if part not in branch: - branch[part.upper()] = {} - branch = branch[part.upper()] - - with open('dn.json', 'w') as f: - json.dump(self.DNs, f, indent=4) - - with open('dntree.json', 'w') as f: - json.dump(self.DNs_sorted, f, indent=4) - - async def dump_acls(self): - pbar = self.create_progress('Dumping SDs', total=len(self.ocache)) - for sid in self.ocache: - dn = self.ocache[sid]['dn'] - secdesc, err = await self.connection.get_objectacl_by_dn(dn) - if err is not None: - raise err - dn = dn.upper() - oentry = { - 'IsACLProtected' : None, - 'Properties' : { - 'haslaps' : 'ms-mcs-admpwd' in self.schema - } - } - otype = self.ocache[sid]['ObjectType'] - if otype == 'trust': - continue - if otype == 'ou': - otype = 'organizational-unit' - if dn.upper() not in self.aces: - aces, relations = parse_binary_acl(oentry, otype.lower(), secdesc, self.schema) - self.aces[dn.upper()] = (aces, relations) - self.update_progress(pbar) - self.close_progress(pbar) - - async def resolve_gplink(self, gplinks): - if gplinks is None: - return [] - - links = [] - for gplink_dn, options in parse_gplink_string(gplinks): - link = {} - link['IsEnforced'] = options == 2 - if reverse_dn_components(gplink_dn.upper()) in self.DNs: - lguid = self.DNs[reverse_dn_components(gplink_dn.upper())]['ObjectIdentifier'] - else: - attrs, err = await self.connection.dnattrs(gplink_dn, ['objectGUID', 'objectSid']) - if err is not None: - raise err - lguid = attrs['objectGUID'] - link['GUID'] = lguid.upper() - links.append(link) - return links - - def remove_hidden(self, entry): - to_del = [] - for k in entry: - if k.startswith('_'): - to_del.append(k) - for k in to_del: - del entry[k] - return entry - - async def dump_domains(self): - pbar = self.create_progress('Dumping domains', self.totals['domain']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'domains', - 'version': 5, - 'count': 0 - } - } - - adinfo, err = await self.connection.get_ad_info() - if err is not None: - raise err - - domainentry = adinfo.to_bh(self.domainname) - meta, relations = self.aces[domainentry['Properties']['distinguishedname'].upper()] - domainentry['IsACLProtected'] = meta['IsACLProtected'] - domainentry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - domainentry['ChildObjects'] = await self.lookup_dn_children(domainentry['Properties']['distinguishedname']) - domainentry['Links'] = await self.resolve_gplink(domainentry['_gPLink']) - - async for entry, err in self.connection.get_all_trusts(): - if err is not None: - raise err - domainentry['Trusts'].append(entry.to_bh()) - - domainentry = self.remove_hidden(domainentry) - jsonstruct['data'].append(domainentry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_domains.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('domains.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_users(self): - pbar = self.create_progress('Dumping users', self.totals['user']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'users', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_users(): - entry = ldapentry.to_bh(self.domainname) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - - if entry['_allowerdtodelegateto'] is not None: - seen = [] - for host in entry['_allowerdtodelegateto']: - try: - target = host.split('/')[1] - target = target.split(':')[0] - except IndexError: - print('[!] Invalid delegation target: %s', host) - continue - try: - sid = self.computer_sidcache[target.lower()] - if sid['ObjectIdentifier'] in seen: - continue - seen[sid['ObjectIdentifier']] = 1 - entry['AllowedToDelegate'].append(sid) - except KeyError: - if '.' in target: - entry['AllowedToDelegate'].append(target.upper()) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_users.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('users.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_computers(self): - pbar = self.create_progress('Dumping computers', self.totals['computer']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'computers', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_machines(): - entry = ldapentry.to_bh(self.domainname) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - - if entry['_allowedtoactonbehalfofotheridentity'] is not None: - allowedacl = base64.b64decode(entry['_allowedtoactonbehalfofotheridentity']) - entryres, relations = parse_binary_acl(entry, 'computer', allowedacl, self.schema) - - for ace in resolve_aces(relations, self.domainname, self.domainsid, self.ocache): - if ace['RightName'] == 'Owner': - continue - if ace['RightName'] == 'GenericAll': - entryres['AllowedToAct'].append({ - 'ObjectIdentifier': ace['PrincipalSID'], - 'ObjectType': ace['PrincipalType'].capitalize() - }) - - del entry['_allowedtoactonbehalfofotheridentity'] - if entry['Properties']['allowedtodelegate'] is not None: - seen = {} - for host in entry['Properties']['allowedtodelegate']: - try: - target = host.split('/')[1] - target = target.split(':')[0] - except IndexError: - print('[!] Invalid delegation target: %s', host) - continue - try: - sid = self.computer_sidcache[target.lower()] - if sid['ObjectIdentifier'] in seen: - continue - seen[sid['ObjectIdentifier']] = 1 - entry['AllowedToDelegate'].append(sid) - except KeyError: - if '.' in target: - entry['AllowedToDelegate'].append({ - "ObjectIdentifier": target.upper(), - "ObjectType": "Computer" - }) - - entry = self.remove_hidden(entry) - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_computers.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('computers.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_groups(self): - pbar = self.create_progress('Dumping groups', self.totals['group']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'groups', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_groups(): - entry = ldapentry.to_bh(self.domainname) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - - if ldapentry.member is not None: - for member in ldapentry.member: - if member.upper() in self.DNs: - oid = self.DNs[member.upper()] - entry['Members'].append({ - 'ObjectIdentifier' : self.ocache[oid]['ObjectIdentifier'], - 'ObjectType' : self.ocache[oid]['ObjectType'].capitalize() - }) - else: - if member.find('ForeignSecurityPrincipals') != -1: - continue - - entry = self.remove_hidden(entry) - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_groups.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('groups.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_gpos(self): - pbar = self.create_progress('Dumping GPOs', self.totals['gpo']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'gpos', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_gpos(): - entry = ldapentry.to_bh(self.domainname, self.domainsid) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_gpos.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('gpos.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_ous(self): - pbar = self.create_progress('Dumping OUs', self.totals['ou']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'ous', - 'version': 5, - 'count': 0 - } - } - - async for ldapentry, err in self.connection.get_all_ous(): - if err is not None: - raise err - entry = ldapentry.to_bh(self.domainname, self.domainsid) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) - entry['Links'] = await self.resolve_gplink(entry['_gPLink']) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_ous.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('ous.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_containers(self): - pbar = self.create_progress('Dumping Containers', self.totals['container']) - jsonstruct = { - 'data' : [], - 'meta': { - 'methods' : 0, - 'type': 'containers', - 'version': 5, - 'count': 0 - } - } - async for ldapentry, err in self.connection.get_all_containers(): - if err is not None: - raise err - if is_filtered_container(ldapentry.distinguishedName): - continue - entry = ldapentry.to_bh(self.domainname, self.domainsid) - meta, relations = self.aces[entry['Properties']['distinguishedname'].upper()] - entry['IsACLProtected'] = meta['IsACLProtected'] - entry['Aces'] = resolve_aces(relations, self.domainname, self.domainsid, self.ocache) - entry['ChildObjects'] = await self.lookup_dn_children(entry['Properties']['distinguishedname']) - entry = self.remove_hidden(entry) - - jsonstruct['data'].append(entry) - jsonstruct['meta']['count'] += 1 - self.update_progress(pbar) - - self.zipfile.writestr('%s_containers.json' % self.curdate, json.dumps(jsonstruct)) - self.close_progress(pbar) - with open('containers.json', 'w') as f: - json.dump(jsonstruct, f) - - async def dump_ldap(self): - print('[+] Connecting to LDAP server') - self.conn_url = LDAPConnectionFactory.from_url(self.ldap_url) - self.connection = self.conn_url.get_client() - self.connection.keepalive = True - _, err = await self.connection.connect() - if err is not None: - raise err - self.ldapinfo = self.connection.get_server_info() - self.domainname = self.ldapinfo['defaultNamingContext'].upper().replace('DC=','').replace(',','.') - - print('[+] Connected to LDAP serrver') - - - await self.dump_schema() - await self.dump_lookuptable() - await self.dump_acls() - with zipfile.ZipFile(self.zipfilepath, 'w', zipfile.ZIP_DEFLATED) as self.zipfile: - await self.dump_domains() - await self.dump_users() - await self.dump_computers() - await self.dump_groups() - await self.dump_gpos() - await self.dump_ous() - await self.dump_containers() - - - - async def run(self): - await self.dump_ldap() +from msldap.bloodhound import MSLDAPDump2Bloodhound async def amain(): args = parser.parse_args() @@ -655,7 +9,7 @@ async def amain(): if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Bloodhound collector for MSLDAP') - parser.add_argument('url', help='LDAP connection URL') + parser.add_argument('url', help='LDAP connection URL, or ADEXPLORER dat file path in the form adexplorer://') print(""" WARNING: This script is still in development. It is not guaranteed to provide the same results as the original Bloodhound collector. """) diff --git a/msldap/examples/msldapclient.py b/msldap/examples/msldapclient.py index 8d14c31..8267ccd 100644 --- a/msldap/examples/msldapclient.py +++ b/msldap/examples/msldapclient.py @@ -45,7 +45,6 @@ from msldap.wintypes.asn1.sdflagsrequest import SDFlagsRequest from tabulate import tabulate from msldap.commons.exceptions import LDAPSearchException -from msldap.commons.utils import is_filtered_container class MSLDAPClientConsole(aiocmd.PromptToolkitCmd): def __init__(self, url = None): @@ -170,346 +169,237 @@ async def do_computeraddr(self): async def do_dump(self): """Fetches ALL user and machine accounts from the domain with a LOT of attributes""" zip_filename = 'dump_%s.zip' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - sdtempname = 'sdtemp_%s.txt' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") try: await self.do_adinfo(False) await self.do_ldapinfo(False) tname = 'domain_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname users_filename = '%s.tsv' % tname - with open(sdtempname, 'w', newline = '') as sdtemp: + try: + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADInfo_ATTRS)+'\r\n') + adinfo, err = await self.connection.get_ad_info() + if err is not None: + raise err + f.write('\t'.join(adinfo.get_row(MSADInfo_ATTRS))+'\r\n') + print('Adinfo was written to %s' % users_filename) + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADInfo_ATTRS)+'\r\n') - adinfo, err = await self.connection.get_ad_info() - if err is not None: - raise err - f.write('\t'.join(adinfo.get_row(MSADInfo_ATTRS))+'\r\n') - sdtemp.write('domain:'+adinfo.distinguishedName + '\r\n') - b.write(json.dumps(adinfo.to_bh(self.adinfo.name))+'\r\n') - print('Adinfo was written to %s' % users_filename) - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'schema_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - users_filename = '%s.json' % tname + tname = 'schema_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.json' % tname - try: - pbar = tqdm(desc = 'Writing schema to file %s' % users_filename) - schema = {} - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - async for user, err in self.connection.get_all_schemaentry(['name', 'schemaidguid']): - if err is not None: - raise err - pbar.update() - schema[user.name.lower()] = str(user.schemaIDGUID) + try: + pbar = tqdm(desc = 'Writing schema to file %s' % users_filename) + schema = {} + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + async for user, err in self.connection.get_all_schemaentry(['name', 'schemaidguid']): + if err is not None: + raise err + pbar.update() + schema[user.name.lower()] = str(user.schemaIDGUID) - json.dump(schema, f) + json.dump(schema, f) print('Schema dump was written to %s' % users_filename) pbar.close() with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: dz.write(users_filename, arcname = users_filename) - finally: - try: - os.remove(users_filename) - except: - pass + finally: + try: + os.remove(users_filename) + except: + pass - tname = 'trusts_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'trusts_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + + try: + pbar = tqdm(desc = 'Writing trusts to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADDomainTrust_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_trusts(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADDomainTrust_ATTRS))+'\r\n') + print('Trust dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing trusts to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADDomainTrust_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_trusts(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADDomainTrust_ATTRS))+'\r\n') - sdtemp.write('trust:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh())+'\r\n') - - print('Computer dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'users_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'users_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname - try: - pbar = tqdm(desc = 'Writing users to file %s' % users_filename) + try: + pbar = tqdm(desc = 'Writing users to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADUser_TSV_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_users(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADUser_TSV_ATTRS))+'\r\n') - sdtemp.write('user:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name))+'\r\n') - print('Users dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADUser_TSV_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_users(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADUser_TSV_ATTRS))+'\r\n') + print('Users dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: + try: + os.remove(users_filename) + except: + pass - tname = 'computers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'computers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing computers to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADMachine_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_machines(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADMachine_ATTRS))+'\r\n') + print('Computer dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing computers to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADMachine_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_machines(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADMachine_ATTRS))+'\r\n') - sdtemp.write('computer:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name))+'\r\n') - print('Computer dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'groups_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'groups_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing groups to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADGroup_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_groups(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADGroup_ATTRS))+'\r\n') + print('Group dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing groups to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADGroup_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_groups(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADGroup_ATTRS))+'\r\n') - sdtemp.write('group:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name))+'\r\n') - print('Group dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'ous_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'ous_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing OUs to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADOU_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_ous(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADOU_ATTRS))+'\r\n') + print('OU dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing OUs to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADOU_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_ous(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADOU_ATTRS))+'\r\n') - sdtemp.write('ou:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name, str(self.adinfo.objectSid)))+'\r\n') - print('OU dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass - tname = 'containers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname - try: - pbar = tqdm(desc = 'Writing Containers to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADContainer_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_containers(): - if err is not None: - raise err - if is_filtered_container(user.distinguishedName): - continue - pbar.update() - f.write('\t'.join(user.get_row(MSADContainer_ATTRS))+'\r\n') - sdtemp.write('container:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name, str(self.adinfo.objectSid)))+'\r\n') - print('Container dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass - - tname = 'gpos_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - bh_filename = 'bh_%s.json' % tname - users_filename = '%s.tsv' % tname + tname = 'containers_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing Containers to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADContainer_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_containers(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADContainer_ATTRS))+'\r\n') + print('Container dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - pbar = tqdm(desc = 'Writing GPOs to file %s' % users_filename) - with open(bh_filename, 'w', newline='', encoding = 'utf8') as b: - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - f.write('\t'.join(MSADOU_ATTRS)+'\r\n') - async for user, err in self.connection.get_all_gpos(): - if err is not None: - raise err - pbar.update() - f.write('\t'.join(user.get_row(MSADGPO_ATTRS))+'\r\n') - sdtemp.write('gpo:'+user.distinguishedName + '\r\n') - b.write(json.dumps(user.to_bh(self.adinfo.name, str(self.adinfo.objectSid)))+'\r\n') - print('GPO dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - dz.write(bh_filename, arcname = bh_filename) - finally: - try: - os.remove(users_filename) - os.remove(bh_filename) - except: - pass + os.remove(users_filename) + except: + pass + tname = 'gpos_%s' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + users_filename = '%s.tsv' % tname + try: + pbar = tqdm(desc = 'Writing GPOs to file %s' % users_filename) + with open(users_filename, 'w', newline='', encoding = 'utf8') as f: + f.write('\t'.join(MSADOU_ATTRS)+'\r\n') + async for user, err in self.connection.get_all_gpos(): + if err is not None: + raise err + pbar.update() + f.write('\t'.join(user.get_row(MSADGPO_ATTRS))+'\r\n') + print('GPO dump was written to %s' % users_filename) + pbar.close() + with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: + dz.write(users_filename, arcname = users_filename) + finally: try: - users_filename = 'tokens_%s.json' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - pbar = tqdm(desc = 'Writing tokens to file %s' % users_filename) - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - async for res, err in self.connection.get_all_tokengroups(): - if err is not None: - raise err - pbar.update() - f.write(json.dumps(res)+'\r\n') - print('Token dump was written to %s' % users_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) - finally: - try: - os.remove(users_filename) - except: - pass + os.remove(users_filename) + except: + pass - try: - dns_filename = 'dns_%s.tsv' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - pbar = tqdm(desc = 'Writing DNS records to file %s' % dns_filename) - with open(dns_filename, 'w', newline='', encoding = 'utf8') as f: - async for zonedn, name, dnsrecod, err in self.connection.dnsentries(): - if err is not None: - raise err - - dnsdataobj = dnsrecod.get_formatted() - line = '\t'.join([zonedn, name, dnsrecod.Type.name, dnsdataobj.to_line()]) - - f.write(line + '\r\n') - pbar.update(1) - print('DNS dump was written to %s' % dns_filename) - pbar.close() - with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(dns_filename, arcname = dns_filename) - finally: - try: - os.remove(dns_filename) - except: - pass - - total = 0 - with open(sdtempname, 'r', newline = '') as sdtemp: - for line in sdtemp: - total += 1 - try: - with open(sdtempname, 'r', newline = '') as sdtemp: - users_filename = 'sds_%s.json' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - pbar = tqdm(desc = 'Writing SDs to file %s' % users_filename, total=total) - with open(users_filename, 'w', newline='', encoding = 'utf8') as f: - for line in sdtemp: - line = line.strip() - line = line.split(':',1) - if len(line) < 2: - continue - dn = line[1].strip() - adsec, err = await self.connection.get_objectacl_by_dn(dn) - if err is not None: - raise err - pbar.update() - f.write(json.dumps({'dn' : dn, 'otype': line[0], 'sd': base64.b64encode(adsec).decode()})+'\r\n') - print('SD dump was written to %s' % users_filename) - pbar.close() - + dns_filename = 'dns_%s.tsv' % datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + pbar = tqdm(desc = 'Writing DNS records to file %s' % dns_filename) + with open(dns_filename, 'w', newline='', encoding = 'utf8') as f: + async for zonedn, name, dnsrecod, err in self.connection.dnsentries(): + if err is not None: + raise err + dnsdataobj = dnsrecod.get_formatted() + line = '\t'.join([zonedn, name, dnsrecod.Type.name, dnsdataobj.to_line()]) + f.write(line + '\r\n') + pbar.update(1) + print('DNS dump was written to %s' % dns_filename) + pbar.close() with zipfile.ZipFile(zip_filename, 'a', compression=zipfile.ZIP_LZMA) as dz: - dz.write(users_filename, arcname = users_filename) + dz.write(dns_filename, arcname = dns_filename) finally: try: - os.remove(users_filename) - os.remove(sdtempname) + os.remove(dns_filename) except: pass + + print('All dumps were written to %s' % zip_filename) return True - except: - try: - os.remove(sdtempname) - except: - pass - + except: traceback.print_exc() return False diff --git a/msldap/external/bloodhoundpy/acls.py b/msldap/external/bloodhoundpy/acls.py index 9d7d43b..e101a3b 100644 --- a/msldap/external/bloodhoundpy/acls.py +++ b/msldap/external/bloodhoundpy/acls.py @@ -21,15 +21,15 @@ # SOFTWARE. # #################### -from __future__ import unicode_literals -import logging -from multiprocessing import Pool +#from __future__ import unicode_literals +import re +import binascii +from msldap import logger from msldap.external.bloodhoundpy.lib.cstruct import * from io import BytesIO -from future.utils import iteritems, native_str +#from future.utils import iteritems, native_str from struct import unpack, pack -import re -import binascii + def bin_to_string(uuid): uuid1, uuid2, uuid3 = unpack('" % (self.mask, ' | '.join(out)) @@ -593,7 +598,8 @@ def __init__(self, fh): def __repr__(self): out = [] - for name, value in iteritems(vars(ACE)): + #for name, value in iteritems(vars(ACE)): + for name, value in vars(ACE): if not name.startswith('_') and type(value) is int and self.has_flag(value): out.append(name) return "" % (self.ace.AceType, ' | '.join(out), self.ace.AceFlags, str(self.acedata)) diff --git a/msldap/external/bloodhoundpy/lib/cstruct.py b/msldap/external/bloodhoundpy/lib/cstruct.py index 3314e40..0de861d 100644 --- a/msldap/external/bloodhoundpy/lib/cstruct.py +++ b/msldap/external/bloodhoundpy/lib/cstruct.py @@ -22,7 +22,7 @@ # - Rework definition parsing, maybe pycparser? # - Change expression implementation # - Lazy reading? -from __future__ import print_function + import re import sys import ast diff --git a/msldap/external/bloodhoundpy/resolver.py b/msldap/external/bloodhoundpy/resolver.py index 9ace839..ba28fbb 100644 --- a/msldap/external/bloodhoundpy/resolver.py +++ b/msldap/external/bloodhoundpy/resolver.py @@ -1,3 +1,5 @@ +from msldap import logger + WELLKNOWN_SIDS = { "S-1-0": ("Null Authority", "USER"), "S-1-0-0": ("Nobody", "USER"), @@ -78,7 +80,7 @@ def resolve_aces(aces, domainname, domainsid, sidcache): else: linkitem = sidcache.get(ace['sid']) if linkitem is None: - print('Cache miss for %s' % ace['sid']) + logger.debug('[EXT-BH] Cache miss for %s' % ace['sid']) entry = { 'type': 'Base', 'objectid': ace['sid'] diff --git a/msldap/external/bloodhoundpy/utils.py b/msldap/external/bloodhoundpy/utils.py new file mode 100644 index 0000000..0d4e63f --- /dev/null +++ b/msldap/external/bloodhoundpy/utils.py @@ -0,0 +1,55 @@ + + + +def reverse_dn_components(dn:str): + rdns = ','.join(reversed(dn.split(','))) + return rdns.upper() + +def explode_dn(dn): + parts = [] + esc = False + part = '' + + for char in dn: + if esc: + part += char + esc = False + elif char == '\\': + esc = True + part += char + elif char == ',': + if part: + parts.append(part) + part = '' + else: + part += char + + if part: + parts.append(part) + + return parts + + +def parse_gplink_string(linkstr): + if not linkstr: + return + for links in linkstr.split('[LDAP://')[1:]: + dn, options = links.rstrip('][').split(';') + yield dn, int(options) + + + +#taken from bloodhound.py +def is_filtered_container(containerdn): + if "CN=DOMAINUPDATES,CN=SYSTEM,DC=" in containerdn.upper(): + return True + if "CN=POLICIES,CN=SYSTEM,DC=" in containerdn.upper() and (containerdn.upper().startswith('CN=USER') or containerdn.upper().startswith('CN=MACHINE')): + return True + return False + +def is_filtered_container_child(containerdn): + if "CN=PROGRAM DATA,DC=" in containerdn.upper(): + return True + if "CN=SYSTEM,DC=" in containerdn.upper(): + return True + return False \ No newline at end of file diff --git a/msldap/ldap_objects/adcomp.py b/msldap/ldap_objects/adcomp.py index 841e601..cab3cdb 100644 --- a/msldap/ldap_objects/adcomp.py +++ b/msldap/ldap_objects/adcomp.py @@ -278,12 +278,12 @@ def to_bh(self, domain): "Status": None, # no idea what this is "DumpSMSAPassword" : [], 'Properties' : { - 'name' : self.name, + 'name' : '%s@%s' % (self.sAMAccountName.upper(), domain.upper()), 'domain' : domain, 'domainsid' : str(self.objectSid).rsplit('-',1)[0] , 'distinguishedname' : str(self.distinguishedName).upper(), 'unconstraineddelegation' : self.uac_to_textflag('UAC_TRUSTED_FOR_DELEGATION'), - 'enabled' : MSLDAP_UAC.ACCOUNTDISABLE in uac, + 'enabled' : MSLDAP_UAC.ACCOUNTDISABLE not in uac, 'trustedtoauth' : MSLDAP_UAC.TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION in uac, 'samaccountname' : self.sAMAccountName , 'haslaps' : self.ms_Mcs_AdmPwdExpirationTime is not None, diff --git a/msldap/ldap_objects/adcontainer.py b/msldap/ldap_objects/adcontainer.py index e99f47a..dbb8f5c 100644 --- a/msldap/ldap_objects/adcontainer.py +++ b/msldap/ldap_objects/adcontainer.py @@ -54,7 +54,12 @@ def to_dict(self): def get_row(self, attrs): t = self.to_dict() - return [str(t.get(x)) if x !='nTSecurityDescriptor' else base64.b64encode(t.get(x, b'')).decode() for x in attrs] + if 'nTSecurityDescriptor' in attrs: + if t['nTSecurityDescriptor'] is not None: + t['nTSecurityDescriptor'] = base64.b64encode(t['nTSecurityDescriptor']).decode() + else: + t['nTSecurityDescriptor'] = b'' + return [str(t.get(x)) for x in attrs] def __str__(self): t = 'MSADContainer\r\n' diff --git a/msldap/ldap_objects/adgroup.py b/msldap/ldap_objects/adgroup.py index 8039686..1f6d832 100644 --- a/msldap/ldap_objects/adgroup.py +++ b/msldap/ldap_objects/adgroup.py @@ -123,7 +123,7 @@ def is_highvalue(sid:str): "IsDeleted": bool(self.isDeleted), "IsACLProtected": False , # Post processing 'Properties' : { - 'name' : self.name, + 'name' : '%s@%s' % (self.name.upper(), domain.upper()), 'domain' : domain, 'domainsid' : str(self.objectSid).rsplit('-',1)[0] , 'distinguishedname' : str(self.distinguishedName).upper(), diff --git a/msldap/ldap_objects/adou.py b/msldap/ldap_objects/adou.py index fc460ee..14f5e27 100644 --- a/msldap/ldap_objects/adou.py +++ b/msldap/ldap_objects/adou.py @@ -86,7 +86,12 @@ def to_dict(self): def get_row(self, attrs): t = self.to_dict() - return [str(t.get(x)) if x !='nTSecurityDescriptor' else base64.b64encode(t.get(x, b'')).decode() for x in attrs] + if 'nTSecurityDescriptor' in attrs: + if t['nTSecurityDescriptor'] is not None: + t['nTSecurityDescriptor'] = base64.b64encode(t['nTSecurityDescriptor']).decode() + else: + t['nTSecurityDescriptor'] = b'' + return [str(t.get(x)) for x in attrs] def __str__(self): t = 'MSADOU\r\n' @@ -103,7 +108,7 @@ def to_bh(self, domain, domainsid): "IsDeleted": bool(self.isDeleted), "IsACLProtected": False , # Post processing 'Properties' : { - 'name' : self.name, + 'name' : '%s@%s' % (self.name.upper(), domain.upper()), 'domain' : domain, 'domainsid' : domainsid, 'distinguishedname' : str(self.distinguishedName).upper(), diff --git a/msldap/protocol/typeconversion.py b/msldap/protocol/typeconversion.py index c0bcf59..e0de7f7 100644 --- a/msldap/protocol/typeconversion.py +++ b/msldap/protocol/typeconversion.py @@ -81,10 +81,10 @@ def single_sid(x, encode = False): return str(SID.from_bytes(x[0])) return [SID.from_string(x).to_bytes()] -def single_str(x, encode = False): +def single_str(x, encode = False, encoding = 'utf-8'): if encode is False: - return x[0].decode() - return [x.encode()] + return x[0].decode(encoding) + return [x.encode(encoding)] def single_bytes(x, encode = False): if encode is False: @@ -96,9 +96,9 @@ def single_int(x, encode = False): return int(x[0]) return [str(x).encode()] -def single_bool(x, encode = False): +def single_bool(x, encode = False, encoding = 'utf-8'): if encode is False: - x = x[0].decode() + x = x[0].decode(encoding) if x == 'FALSE': return False return True @@ -114,10 +114,10 @@ def single_sd(x, encode = False): return SECURITY_DESCRIPTOR.from_bytes(x[0]) return [x.to_bytes()] -def single_date(x, encode = False): +def single_date(x, encode = False, encoding = 'utf-8'): if encode is False: try: - x = x[0].decode() + x = x[0].decode(encoding) match = time_format.fullmatch(x) if match is None: return x @@ -186,12 +186,12 @@ def multi_int(x, encode = False): x = [x] return [str(k).encode() for k in x] -def multi_str(x, encode = False): +def multi_str(x, encode = False, encoding = 'utf-8'): if encode is False: - return [e.decode() for e in x ] + return [e.decode(encoding) for e in x ] if isinstance(x, list) is False: x = [x] - return [line.encode() for line in x] + return [line.encode(encoding) for line in x] def multi_sid(x, encode = False): if encode is False: diff --git a/setup.py b/setup.py index 7e341b3..53170a2 100644 --- a/setup.py +++ b/setup.py @@ -48,10 +48,10 @@ ], install_requires=[ 'unicrypto>=0.0.10', - 'asyauth>=0.0.16', - 'asysocks>=0.2.9', + 'asyauth>=0.0.18', + 'asysocks>=0.2.11', 'asn1crypto>=1.3.0', - 'winacl>=0.1.7', + 'winacl>=0.1.8', 'prompt-toolkit>=3.0.2', 'tqdm', 'wcwidth', @@ -60,6 +60,7 @@ entry_points={ 'console_scripts': [ 'msldap = msldap.examples.msldapclient:main', + 'msldap-bloodhound = msldap.examples.msldapbloodhound:main', ], } )