-
Notifications
You must be signed in to change notification settings - Fork 0
/
forensics_v1.bib
71 lines (66 loc) · 9.08 KB
/
forensics_v1.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
@article{altinisik_automatic_2021,
title = {Automatic {Generation} of {H}.264 {Parameter} {Sets} to {Recover} {Video} {File} {Fragments}},
url = {http://arxiv.org/abs/2104.14522},
abstract = {We address the problem of decoding video file fragments when the necessary encoding parameters are missing. With this objective, we propose a method that automatically generates H.264 video headers containing these parameters and extracts coded pictures in the partially available compressed video data. To accomplish this, we examined a very large corpus of videos to learn patterns of encoding settings commonly used by encoders and created a parameter dictionary. Further, to facilitate a more efficient search our method identifies characteristics of a coded bitstream to discriminate the entropy coding mode. It also utilizes the application logs created by the decoder to identify correct parameter values. Evaluation of the effectiveness of the proposed method on more than 55K videos with diverse provenance shows that it can generate valid headers on average in 11.3 decoding trials per video. This result represents an improvement by more than a factor of 10 over the conventional approach of video header stitching to recover video file fragments.},
urldate = {2021-09-29},
journal = {arXiv:2104.14522 [cs]},
author = {Altinisik, Enes and Sencar, Hüsrev Taha},
month = sep,
year = {2021},
note = {arXiv: 2104.14522},
keywords = {Computer Science - Multimedia},
}
@article{uzun_jpg_2020,
title = {JpgScraper : {An} {Advanced} {Carver} for {JPEG} {Files}},
volume = {15},
issn = {1556-6021},
shorttitle = {JpgScraper},
doi = {10.1109/TIFS.2019.2953382},
abstract = {Orphaned file fragment carving is concerned with recovering contents of encoded data in the absence of any coding metadata. Constructing an orphaned file carver requires addressing three challenges: a specialized decoder to interpret partial file data; the ability to discriminate a specific type of encoded data from all other types of data; and comprehensive prior knowledge on possible encoding settings. In this work, we build on the ability to render a partial image contained within a segment of JPEG coded data to introduce a new carving tool that addresses all these challenges. Towards this goal, we first propose a new method that discriminates JPEG file data from among 993 file data types with 97.7\% accuracy. We also introduce a method for robustly delimiting entropy coded data segments of JPEG files. This in turn allows us to identify partial JPEG file headers with zero false rejection and 0.1\% of false alarm rate. Secondly, we examine a very diverse image set comprising more than 7 million images. This ensures comprehensive coverage of coding parameters used by 3,269 camera models and a wide variety of image editing tools. Further, we assess the potential impact of the developed tool on practice in terms of the amount of new evidence that it can recover. Recovery results on a set of used SD cards purchased online show that our carver is able to recover 24\% more image data as compared to existing file carving tools. Evaluations performed on a standard dataset also show that JpgScraper improves the state-of-the-art significantly in carving JPEG file data.},
journal = {IEEE Transactions on Information Forensics and Security},
author = {Uzun, Erkam and Sencar, Hüsrev Taha},
year = {2020},
keywords = {Transform coding, Tools, Decoding, Encoding, Image coding, Metadata, Entropy, Orphaned file fragment, file carving, JPEG, file type identification},
pages = {1846--1857},
}
@article{altinisik_mitigation_2020,
title = {Mitigation of {H}.264 and {H}.265 {Video} {Compression} for {Reliable} {PRNU} {Estimation}},
volume = {15},
issn = {1556-6021},
doi = {10.1109/TIFS.2019.2945190},
abstract = {The photo-response non-uniformity (PRNU) is a distinctive image sensor characteristic, and an imaging device inadvertently introduces its sensor's PRNU into all media it captures. Therefore, the PRNU can be regarded as a camera fingerprint and used for source attribution. The imaging pipeline in a camera, however, involves various processing steps that are detrimental to PRNU estimation. In the context of photographic images, these challenges are successfully addressed and the method for estimating a sensor's PRNU pattern is well established. However, various additional challenges related to generation of videos remain largely untackled. With this perspective, this work introduces methods to mitigate disruptive effects of widely deployed H.264 and H.265 video compression standards on PRNU estimation. Our approach involves an intervention in the decoding process to eliminate a filtering procedure applied at the decoder to reduce blockiness. It also utilizes decoding parameters to develop a weighting scheme and adjust the contribution of video frames at the macroblock level to PRNU estimation process. Results obtained on videos captured by 28 cameras show that our approach increases the PRNU matching metric up to more than five times over the conventional estimation method tailored for photos. Tests on a public dataset also verify that the proposed method improves the attribution performance by increasing the accuracy and allowing the use of smaller length videos to perform attribution.},
journal = {IEEE Transactions on Information Forensics and Security},
author = {Altinisik, Enes and Tasdemir, Kasim and Sencar, Husrev Taha},
year = {2020},
keywords = {Estimation, Cameras, Video compression, Reliability, Pipelines, Standards, Photo-response non-uniformity (PRNU), video source attribution, H.264/H.265 encoding \& decoding},
pages = {1557--1571},
}
@article{altinisik_source_2021,
title = {Source {Camera} {Verification} for {Strongly} {Stabilized} {Videos}},
volume = {16},
issn = {1556-6021},
doi = {10.1109/TIFS.2020.3016830},
abstract = {Image stabilization performed during imaging and/or post-processing poses one of the most significant challenges to photo-response non-uniformity based source camera attribution from videos. When performed digitally, stabilization involves cropping, warping, and inpainting of video frames to eliminate unwanted camera motion. Hence, successful attribution requires inversion of these transformations in a blind manner. To address this challenge, we introduce a source camera verification method for videos that takes into account spatially variant nature of stabilization transformations and assumes a larger degree of freedom in their search. Our method identifies transformations at a sub-frame level, incorporates a number of constraints to validate their correctness, and offers computational flexibility in the search for the correct transformation. The method also adopts a holistic approach in countering disruptive effects of other video generation steps, such as video coding and downsizing, for more reliable attribution. Tests performed on one public and two custom datasets show that the proposed method is able to verify the source of 23-30\% of all videos that underwent stronger stabilization, depending on computation load, without a significant impact on false attribution.},
journal = {IEEE Transactions on Information Forensics and Security},
author = {Altinisik, Enes and Sencar, Hüsrev Taha},
year = {2021},
keywords = {Videos, Cameras, Media, Estimation, Reliability, Transforms, Source camera verification, stabilized video, photo-response non-uniformity (PRNU), stabilization transformation inversion},
pages = {643--657},
}
@article{altinisik_prnu_2021,
title = {{PRNU} {Estimation} from {Encoded} {Videos} {Using} {Block}-{Based} {Weighting}},
volume = {2021},
issn = {2470-1173},
url = {http://arxiv.org/abs/2008.08138},
doi = {10.2352/ISSN.2470-1173.2021.4.MWSF-338},
abstract = {Estimating the photo-response non-uniformity (PRNU) of an imaging sensor from videos is a challenging task due to complications created by several processing steps in the camera imaging pipeline. Among these steps, video coding is one of the most disruptive to PRNU estimation because of its lossy nature. Since videos are always stored in a compressed format, the ability to cope with the disruptive effects of encoding is central to reliable attribution. In this work, by focusing on the block-based operation of widely used video coding standards, we present an improved approach to PRNU estimation that exploits this behavior. To this purpose, several PRNU weighting schemes that utilize block-level parameters, such as encoding block type, quantization strength, and rate-distortion value, are proposed and compared. Our results show that the use of the coding rate of a block serves as a better estimator for the strength of PRNU with almost three times improvement in the matching statistic at low to medium coding bitrates as compared to the basic estimation method developed for photos.},
number = {4},
urldate = {2021-09-29},
journal = {Electronic Imaging},
author = {Altinisik, Enes and Tasdemir, Kasim and Sencar, Husrev Taha},
month = jan,
year = {2021},
note = {arXiv: 2008.08138},
keywords = {Electrical Engineering and Systems Science - Image and Video Processing, Computer Science - Cryptography and Security},
pages = {338--1--338--7},
}