Skip to content

Commit

Permalink
Merge pull request #173 from obsidianforensics/add-indexeddb
Browse files Browse the repository at this point in the history
Add parsing of IndexedDB records. Update included ccl_chrome_indexedd…
  • Loading branch information
obsidianforensics authored May 1, 2024
2 parents 20c8514 + 945b95e commit 34bc9fa
Show file tree
Hide file tree
Showing 12 changed files with 793 additions and 178 deletions.
50 changes: 31 additions & 19 deletions pyhindsight/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,8 +905,8 @@ def generate_excel(self, output_object):
s = workbook.add_worksheet('Storage')
# Title bar
s.merge_range('A1:G1', f'Hindsight Internet History Forensics (v{__version__})', title_header_format)
s.merge_range('H1:J1', 'Backing Database Specific', center_header_format)
s.merge_range('K1:M1', 'FileSystem Specific', center_header_format)
s.merge_range('H1:K1', 'Backing Database Specific', center_header_format)
s.merge_range('L1:N1', 'FileSystem Specific', center_header_format)

# Write column headers
s.write(1, 0, 'Type', header_format)
Expand All @@ -917,11 +917,12 @@ def generate_excel(self, output_object):
s.write(1, 5, 'Interpretation', header_format)
s.write(1, 6, 'Profile', header_format)
s.write(1, 7, 'Source Path', header_format)
s.write(1, 8, 'Sequence', header_format)
s.write(1, 9, 'State', header_format)
s.write(1, 10, 'File Exists?', header_format)
s.write(1, 11, 'File Size (bytes)', header_format)
s.write(1, 12, 'File Type (Confidence %)', header_format)
s.write(1, 8, 'Database', header_format)
s.write(1, 9, 'Sequence', header_format)
s.write(1, 10, 'State', header_format)
s.write(1, 11, 'File Exists?', header_format)
s.write(1, 12, 'File Size (bytes)', header_format)
s.write(1, 13, 'File Type (Confidence %)', header_format)

# Set column widths
s.set_column('A:A', 16) # Type
Expand All @@ -932,11 +933,12 @@ def generate_excel(self, output_object):
s.set_column('F:F', 50) # Interpretation
s.set_column('G:G', 50) # Profile
s.set_column('H:H', 50) # Source Path
s.set_column('I:I', 8) # Seq
s.set_column('J:J', 8) # State
s.set_column('K:K', 8) # Exists
s.set_column('L:L', 16) # Size
s.set_column('M:M', 25) # Type
s.set_column('I:I', 16) # Database
s.set_column('J:J', 8) # Seq
s.set_column('K:K', 8) # State
s.set_column('L:L', 8) # Exists
s.set_column('M:M', 16) # Size
s.set_column('N:N', 25) # Type

# Start at the row after the headers, and begin writing out the items in parsed_artifacts
row_number = 2
Expand All @@ -951,11 +953,11 @@ def generate_excel(self, output_object):
s.write(row_number, 5, item.interpretation, black_value_format)
s.write(row_number, 6, item.profile, black_value_format)
s.write(row_number, 7, item.source_path, black_value_format)
s.write_number(row_number, 8, item.seq, black_value_format)
s.write_string(row_number, 9, item.state, black_value_format)
s.write(row_number, 10, item.file_exists, black_value_format)
s.write(row_number, 11, item.file_size, black_value_format)
s.write(row_number, 12, item.magic_results, black_value_format)
s.write_number(row_number, 9, item.seq, black_value_format)
s.write_string(row_number, 10, item.state, black_value_format)
s.write(row_number, 11, item.file_exists, black_value_format)
s.write(row_number, 12, item.file_size, black_value_format)
s.write(row_number, 13, item.magic_results, black_value_format)

elif item.row_type.startswith(("local storage", "session storage")):
s.write_string(row_number, 0, item.row_type, black_type_format)
Expand All @@ -966,9 +968,19 @@ def generate_excel(self, output_object):
s.write(row_number, 5, item.interpretation, black_value_format)
s.write(row_number, 6, item.profile, black_value_format)
s.write(row_number, 7, item.source_path, black_value_format)
s.write_number(row_number, 8, item.seq, black_value_format)
s.write_string(row_number, 9, item.state, black_value_format)
s.write_number(row_number, 9, item.seq, black_value_format)
s.write_string(row_number, 10, item.state, black_value_format)

elif item.row_type.startswith("indexeddb"):
s.write_string(row_number, 0, item.row_type, black_type_format)
s.write_string(row_number, 1, item.origin, black_url_format)
s.write_string(row_number, 2, item.key, black_field_format)
s.write_string(row_number, 3, item.value, black_value_format)
s.write(row_number, 5, item.interpretation, black_value_format)
s.write(row_number, 6, item.profile, black_value_format)
s.write(row_number, 7, item.source_path, black_value_format)
s.write(row_number, 8, item.database, black_value_format)
s.write_number(row_number, 9, item.seq, black_value_format)
except Exception as e:
log.error(f'Failed to write row to XLSX: {e}')

Expand Down
70 changes: 67 additions & 3 deletions pyhindsight/browsers/chrome.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import urllib
import base64

import pyhindsight.lib.ccl_chrome_indexeddb.ccl_blink_value_deserializer
from pyhindsight.lib.ccl_chrome_indexeddb import ccl_chromium_indexeddb
from pyhindsight.browsers.webbrowser import WebBrowser
from pyhindsight import utils

Expand Down Expand Up @@ -1105,7 +1105,7 @@ def get_session_storage(self, path, dir_name):
try:
ss_ldb_records = ccl_chromium_sessionstorage.SessionStoreDb(pathlib.Path(ss_path))
except ValueError as e:
log.warning(f' - Error reading records; possible LevelDB corruption')
log.warning(f' - Error reading records ({e}); possible LevelDB corruption')
self.artifacts_counts['Session Storage'] = 'Failed'

if ss_ldb_records:
Expand All @@ -1128,6 +1128,63 @@ def get_session_storage(self, path, dir_name):

log.info(f' - Parsed {len(results)} Session Storage items')
self.parsed_storage.extend(results)
def get_indexeddb(self, path, dir_name):
results = []

# Grab file list of 'IndexedDB' directory
idb_path = os.path.join(path, dir_name)
log.info('IndexedDB:')
log.info(f' - Reading from {idb_path}')

idb_storage_listing = os.listdir(idb_path)
log.debug(f' - {len(idb_storage_listing)} files in IndexedDB directory')

for storage_directory in idb_storage_listing:
if not storage_directory.endswith('.leveldb'):
continue

# The Ghostery extension has 1M+ records in it; skip for now.
if storage_directory == 'chrome-extension_mlomiejdfkolichcflejclcbmpeaniij_0.indexeddb.leveldb':
continue

origin = storage_directory.split('.indexeddb')[0]
blob_directory = None
blob_path = os.path.join(idb_path, f'{origin}.indexeddb.blob')
if os.path.exists(blob_path):
blob_directory = blob_path

try:
origin_idb = ccl_chromium_indexeddb.WrappedIndexDB(
leveldb_dir=os.path.join(idb_path, f'{origin}.indexeddb.leveldb'), leveldb_blob_dir=blob_directory)
except ValueError as e:
log.error(f' - {e} when processing {storage_directory}')
continue

except Exception as e:
log.error(f' - Unexpected Exception ({e}) when processing {storage_directory}')
continue

for database_id in origin_idb.database_ids:
database = origin_idb[database_id.dbid_no]
for obj_store_name in database.object_store_names:
obj_store = database.get_object_store_by_name(obj_store_name)
try:
for record in obj_store.iterate_records():
results.append(Chrome.IndexedDBItem(
self.profile_path, origin, str(record.key.value), str(record.value),
int(record.sequence_number), str(database.name), storage_directory))
except FileNotFoundError as e:
log.error(f' - File ({e}) not found while processing {database}')

except ValueError as e:
log.error(f' - ValueError ({e}) when processing {database}')

except Exception as e:
log.error(f' - Unexpected Exception: {e}')

self.artifacts_counts['IndexedDB'] = len(results)
log.info(f' - Parsed {len(results)} items from {len(idb_storage_listing)} files')
self.parsed_storage.extend(results)

def get_extensions(self, path, dir_name):
results = []
Expand Down Expand Up @@ -1575,7 +1632,7 @@ def zoom_level_to_zoom_factor(zoom_level):
if prefs.get('password_manager'):
if prefs['password_manager'].get('profile_store_date_last_used_for_filling'):
timestamped_preference_item = Chrome.SiteSetting(
self.profile_path, url=None,
self.profile_path, url='',
timestamp=utils.to_datetime(
prefs['password_manager']['profile_store_date_last_used_for_filling'], self.timezone),
key=f'profile_store_date_last_used_for_filling [in {preferences_file}.password_manager]',
Expand Down Expand Up @@ -2482,6 +2539,13 @@ def process(self):
self.artifacts_display['Archived History'],
self.artifacts_counts.get('Archived History', '0')))

if 'IndexedDB' in input_listing:
self.get_indexeddb(self.profile_path, 'IndexedDB')
self.artifacts_display['IndexedDB'] = 'IndexedDB records'
print(self.format_processing_output(
self.artifacts_display['IndexedDB'],
self.artifacts_counts.get('IndexedDB', '0')))

if 'Media History' in input_listing:
self.get_media_history(self.profile_path, 'Media History', self.version, 'media (playback end)')
self.artifacts_display['Media History'] = "Media History records"
Expand Down
23 changes: 23 additions & 0 deletions pyhindsight/browsers/webbrowser.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,29 @@ def __init__(self, profile, origin, key, value, seq, state, source_path):
self.state = state
self.source_path = source_path

class IndexedDBItem(StorageItem):
def __init__(self, profile, origin, key, value, seq, database, source_path):
"""
:param profile: The path to the browser profile this item is part of.
:param origin: The web origin this IndexedDBItem item belongs to.
:param key: The key of the IndexedDBItem item.
:param value: The value of the IndexedDBItem item.
:param seq: The sequence number.
:param database: The database within the IndexedDB file the record is part of.
:param source_path: The path to the source of the record.
"""
super(WebBrowser.IndexedDBItem, self).__init__(
'indexeddb', profile=profile, origin=origin, key=key, value=value, seq=seq,
source_path=source_path)
self.profile = profile
self.origin = origin
self.key = key
self.value = value
self.seq = seq
self.database = database
self.source_path = source_path

class FileSystemItem(StorageItem):
def __init__(self, profile, origin, key, value, seq, state, source_path, last_modified=None,
file_exists=None, file_size=None, magic_results=None):
Expand Down
Loading

0 comments on commit 34bc9fa

Please sign in to comment.