Publications
2021

Mirzaalian, Hengameh; Hussein, Mohamed E; Spinoulas, Leonidas; May, Jonathan; Abd-Almageed, Wael
Explaining Face Presentation Attack Detection Using Natural Language Conference
Proceedings of the 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG), 2021.
@conference{Mirzaalian2021,
title = {Explaining Face Presentation Attack Detection Using Natural Language},
author = {Hengameh Mirzaalian and Mohamed E Hussein and Leonidas Spinoulas and Jonathan May and Wael Abd-Almageed},
url = {https://ieeexplore.ieee.org/document/9667024
https://arxiv.org/abs/2111.04862
/hussein-2021-explaining
https://github.com/isicv/padisi_usc_dataset},
year = {2021},
date = {2021-12-15},
urldate = {2021-12-15},
booktitle = {Proceedings of the 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG)},
journal = {arXiv preprint arXiv:2111.04862},
abstract = {A large number of deep neural network based techniques have been developed to address the challenging problem of face presentation attack detection (PAD). Whereas such techniques' focus has been on improving PAD performance in terms of classification accuracy and robustness against unseen attacks and environmental conditions, there exists little attention on the explainability of PAD predictions. In this paper, we tackle the problem of explaining PAD predictions through natural language. Our approach passes feature representations of a deep layer of the PAD model to a language model to generate text describing the reasoning behind the PAD prediction. Due to the limited amount of annotated data in our study, we apply a light-weight LSTM network as our natural language generation model. We investigate how the quality of the generated explanations is affected by different loss functions, including the commonly used word-wise cross entropy loss, a sentence discriminative loss, and a sentence semantic loss. We perform our experiments using face images from a dataset consisting of 1,105 bona-fide and 924 presentation attack samples. Our quantitative and qualitative results show the effectiveness of our model for generating proper PAD explanations through text as well as the power of the sentence-wise losses. To the best of our knowledge, this is the first introduction of a joint biometrics-NLP task. Our dataset can be obtained through our GitHub page.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Li, Jiazhi; Abd-Almageed, Wael
Information-Theoretic Bias Assessment Of Learned Representations Of Pretrained Face Recognition Conference
Proceedings of the 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG), 2021.
@conference{Li2021,
title = {Information-Theoretic Bias Assessment Of Learned Representations Of Pretrained Face Recognition},
author = {Jiazhi Li and Wael Abd-Almageed},
url = {https://arxiv.org/abs/2111.04673
/jiazhi-2021-information},
year = {2021},
date = {2021-11-08},
urldate = {2021-11-08},
booktitle = {Proceedings of the 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG)},
journal = {arXiv preprint arXiv:2111.04673},
abstract = {As equality issues in the use of face recognition have garnered a lot of attention lately, greater efforts have been made to de-biased deep learning models to improve fairness to minorities. However, there is still no clear definition nor sufficient analysis for bias assessment metrics. We propose an information-theoretic, independent bias assessment metric to identify degree of bias against protected demographic attributes from learned representations of pre-trained facial recognition systems. Our metric differs from other methods that rely on classification accuracy or examine the differences between ground truth and predicted labels of protected attributes predicted using a shallow network. Also, we argue, theoretically and experimentally, that logits-level loss is not adequate to explain bias since predictors based on neural networks will always find correlations. Further, we present a synthetic dataset that mitigates the issue of insufficient samples in certain cohorts. Lastly, we establish a benchmark metric by presenting advantages in clear discrimination and small variation comparing with other metrics, and evaluate the performance of different de-biased models with the proposed metric.
},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Hussein, Mohamed E; AbdAlmageed, Wael
Introducing the DOME Activation Functions Journal Article
In: arXiv preprint arXiv:2109.14798, 2021.
@article{nokey,
title = {Introducing the DOME Activation Functions},
author = {Mohamed E Hussein and Wael AbdAlmageed},
editor = {Mohamed E Hussein and Wael AbdAlmageed},
url = {https://arxiv.org/abs/2109.14798},
year = {2021},
date = {2021-09-30},
urldate = {2021-09-30},
journal = {arXiv preprint arXiv:2109.14798},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Cao, Huaigu; AbdAlmageed, W.
Customizable Camera Verification for Media Forensic Conference
Proceedings of the 16th International Conference on Document Analysis and Recognition (ICDAR), Lecture Notes in Computer Science, vol. 12823, Springer, 2021.
@conference{Cao2021,
title = {Customizable Camera Verification for Media Forensic},
author = {Huaigu Cao and W. AbdAlmageed},
editor = {Lladós J., Lopresti D., Uchida S. },
url = {https://link.springer.com/chapter/10.1007/978-3-030-86334-0_24},
doi = {10.1007/978-3-030-86334-0_24},
year = {2021},
date = {2021-09-05},
urldate = {2021-09-05},
booktitle = {Proceedings of the 16th International Conference on Document Analysis and Recognition (ICDAR), Lecture Notes in Computer Science},
journal = {IAPR International Conference on Document Analysis and Recognition (ICDAR), 2021},
volume = {12823},
pages = {369-379},
publisher = {Springer},
abstract = {This paper presents our research work in camera verification. We expanded a convolutional network-based feature extraction/verification network to a multi-patch input and addressed the concerns over memory limitation and overfitting issue. We have also made careful consideration for custom model training and provided strong results showing promising potential for real-world application of detecting scene text repurposing.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Jati, Arindam; Hsu, Chin-Cheng; Pal, Monisankha; Peri, Raghuveer; AbdAlmageed, Wael; Narayanan, Shrikanth
Adversarial attack and defense strategies for deep speaker recognition systems Journal Article
In: Computer Speech & Language, vol. 68, pp. 101199, 2021.
@article{Jati2021,
title = {Adversarial attack and defense strategies for deep speaker recognition systems},
author = {Arindam Jati and Chin-Cheng Hsu and Monisankha Pal and Raghuveer Peri and Wael AbdAlmageed and Shrikanth Narayanan},
url = {https://www.sciencedirect.com/science/article/abs/pii/S0885230821000061},
doi = {https://doi.org/10.1016/j.csl.2021.101199},
year = {2021},
date = {2021-07-01},
urldate = {2021-07-01},
journal = {Computer Speech & Language},
volume = {68},
pages = {101199},
abstract = {Robust speaker recognition, including in the presence of malicious attacks, is becoming increasingly important and essential, especially due to the proliferation of smart speakers and personal agents that interact with an individual’s voice commands to perform diverse and even sensitive tasks. Adversarial attack is a recently revived domain which is shown to be effective in breaking deep neural network-based classifiers, specifically, by forcing them to change their posterior distribution by only perturbing the input samples by a very small amount. Although, significant progress in this realm has been made in the computer vision domain, advances within speaker recognition is still limited. We present an expository paper that considers several adversarial attacks to a deep speaker recognition system, employs strong defense methods as countermeasures, and reports a comprehensive set of ablation studies to better understand the problem. The experiments show that the speaker recognition systems are vulnerable to adversarial attacks, and the strongest attacks can reduce the accuracy of the system from 94% to even 0%. The study also compares the performances of the employed defense methods in detail, and finds adversarial training based on Projected Gradient Descent (PGD) to be the best defense method in our setting. We hope that the experiments presented in this paper provide baselines that can be useful for the research community interested in further studying adversarial robustness of speaker recognition systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Pal; Arindam Monisankha, Jati; Raghuveer
Adversarial defense for deep speaker recognition using hybrid adversarial training Conference
Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6164-6168, 2021.
@conference{Pal2021,
title = {Adversarial defense for deep speaker recognition using hybrid adversarial training},
author = {Monisankha, Pal; Arindam, Jati; Raghuveer, Peri; Chin-Cheng, Hsu; Wael, AbdAlmageed; Shrikanth, Narayanan},
url = {https://ieeexplore.ieee.org/abstract/document/9414843},
doi = {https://doi.org/10.1109/ICASSP39728.2021.9414843},
year = {2021},
date = {2021-06-06},
urldate = {2021-06-06},
booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6164-6168},
journal = {ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {6164-6168},
abstract = {Deep neural network based speaker recognition systems can easily be deceived by an adversary using minuscule imperceptible perturbations to the input speech samples. These adversarial attacks pose serious security threats to the speaker recognition systems that use speech biometric. To address this concern, in this work, we propose a new defense mechanism based on a hybrid adversarial training (HAT) setup. In contrast to existing works on countermeasures against adversarial attacks in deep speaker recognition that only use class-boundary information by supervised cross-entropy (CE) loss, we propose to exploit additional information from supervised and unsupervised cues to craft diverse and stronger perturbations for adversarial training. Specifically, we employ multi-task objectives using CE, feature-scattering (FS), and margin losses to create adversarial perturbations and include them for adversarial training to enhance the robustness of the model. We conduct speaker recognition experiments on the Librispeech dataset, and compare the performance with state-of-the-art projected gradient descent (PGD)-based adversarial training which employs only CE objective. The proposed HAT improves adversarial accuracy by absolute 3.29% and 3.18% for PGD and Carlini-Wagner (CW) attacks respectively, while retaining high accuracy on benign examples.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Spinoulas, Leonidas; Hussein, Mohamed; Geissbuhler, David; Mathai, Joe; Almeida, Oswin G.; Clivaz, Guillaume; Marcel, Sébastien; AbdAlmageed, Wael
Multispectral Biometrics System Framework: Application to Presentation Attack Detection Journal Article
In: IEEE Sensors Journal (JSEN), vol. 21, no. 13, pp. 15022-15041, 2021.
@article{Spinoulas2021,
title = {Multispectral Biometrics System Framework: Application to Presentation Attack Detection},
author = {Leonidas Spinoulas and Mohamed Hussein and David Geissbuhler and Joe Mathai and Oswin G. Almeida and Guillaume Clivaz and Sébastien Marcel and Wael AbdAlmageed},
url = {https://ieeexplore.ieee.org/document/9409166
https://arxiv.org/abs/2006.07489
/spinoulas-2021-multispectral},
doi = {10.1109/JSEN.2021.3074406 },
year = {2021},
date = {2021-04-20},
urldate = {2021-04-20},
journal = {IEEE Sensors Journal (JSEN)},
volume = {21},
number = {13},
pages = {15022-15041},
abstract = {In this work, we present a general framework for building a biometrics system capable of capturing multispectral data from a series of sensors synchronized with active illumination sources. The framework unifies the system design for different biometric modalities and its realization on face, finger and iris data is described in detail. To the best of our knowledge, the presented design is the first to employ such a diverse set of electromagnetic spectrum bands, ranging from visible to long-wave-infrared wavelengths, and is capable of acquiring large volumes of data in seconds, which enabled us to successfully conduct a series of data collection events. We also present a comprehensive analysis on the captured data using a deep-learning classifier for presentation attack detection. Our analysis follows a data-centric approach attempting to highlight the strengths and weaknesses of each spectral band at distinguishing live from fake samples.},
type = {BATL},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

J.Ma,; H.Xie,; G.Han,; SF.Chang,; A.Galstyan,; W.Abd-Almageed,
PAL: Partner-Assisted Learning for Few-Shot Image Classification Conference
Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021.
@conference{J.Ma2021,
title = {PAL: Partner-Assisted Learning for Few-Shot Image Classification},
author = {J.Ma and H.Xie and G.Han and SF.Chang and A.Galstyan and W.Abd-Almageed},
url = {https://arxiv.org/pdf/2109.07607.pdf
/hanchen-2021-pal},
year = {2021},
date = {2021-04-15},
urldate = {2021-04-15},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
journal = {International Conference on Computer Vision (ICCV), 2021},
pages = {10573-10582},
abstract = {Few-shot Learning has been studied to mimic human visual capabilities and learn effective models without the need of exhaustive human annotation. Even though the idea of meta-learning for adaptation has dominated the few-shot learning methods, how to train a feature extractor is still a challenge. In this paper, we focus on the design of training strategy to obtain an elemental representation such that the prototype of each novel class can be estimated from a few labeled samples. We propose a two-stage training scheme, Partner-Assisted Learning (PAL), which first trains a partner encoder to model pair-wise similarities and extract features serving as soft-anchors, and then trains a main encoder by aligning its outputs with soft-anchors while attempting to maximize classification performance. Two alignment constraints from logit-level and feature-level are designed individually. For each few-shot task, we perform prototype classification. Our method consistently outperforms the state-of-the-art method on four benchmarks. Detailed ablation studies of PAL are provided to justify the selection of each component involved in training.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Spinoulas, Leonidas; Mirzaalian, Hengameh; Hussein, Mohamed; AbdAlmageed, Wael
Multi-Modal Fingerprint Presentation Attack Detection: Evaluation on A New Dataset Journal Article
In: IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM), vol. 3, pp. 347-364, 2021, ISSN: 2637-6407.
@article{Spinoulas2021b,
title = {Multi-Modal Fingerprint Presentation Attack Detection: Evaluation on A New Dataset},
author = {Leonidas Spinoulas and Hengameh Mirzaalian and Mohamed Hussein and Wael AbdAlmageed },
url = {https://ieeexplore.ieee.org/document/9399674
https://arxiv.org/abs/2006.07498
https://github.com/isicv/padisi_usc_dataset
/spinoulas-2021-multispectral},
issn = {2637-6407},
year = {2021},
date = {2021-04-09},
urldate = {2021-04-09},
journal = {IEEE Transactions on Biometrics, Behavior, and Identity Science (TBIOM)},
volume = {3},
pages = {347-364},
abstract = {Fingerprint presentation attack detection is becoming an increasingly challenging problem due to the continuous advancement of attack preparation techniques, which generate realistic-looking fake fingerprint presentations. In this work, rather than relying on legacy fingerprint images, which are widely used in the community, we study the usefulness of multiple recently introduced sensing modalities. Our study covers front-illumination imaging using short-wave-infrared, near-infrared, and laser illumination; and back-illumination imaging using near-infrared light. Toward studying the effectiveness of each of these unconventional sensing modalities and their fusion for liveness detection, we conducted a comprehensive analysis using a fully convolutional deep neural network framework. Our evaluation compares different combinations of the new sensing modalities to legacy data from one of our collections, showing the superiority of the new sensing modalities. It also covers the cases of known and unknown attacks and the cases of intra-dataset and inter-dataset evaluations. Our results indicate that the power of our approach stems from the nature of the captured data rather than the employed classification framework, which justifies the extra cost for hardware-based (or hybrid) solutions. The portion of the dataset that is under the control of the authors is publicly released, along with the associated code.},
key = {BATL},
type = {1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Abd-Almageed, Wael; Nevatia, Ram; Verdoliva, Annalisa; Riess, Christian
Digital, Semantic and Physical Analysis of Media Integrity Technical Report
2021.
@techreport{Abd-Almageed2021,
title = {Digital, Semantic and Physical Analysis of Media Integrity},
author = {Wael Abd-Almageed and Ram Nevatia and Annalisa Verdoliva and Christian Riess},
url = {https://apps.dtic.mil/sti/pdfs/AD1125696.pdf},
year = {2021},
date = {2021-03-24},
urldate = {2021-03-24},
abstract = {In this report, we present summary of the digital, physical and semantic image forensics and integrity methods developed by the DiSPARITY team, lead by the University of Southern California Information Sciences Institute, under DARPA’s Media Forensics program between 2016 and 2020. The team also included University of Naples, Italy and University of Erlangen-Nuremberg, Germany. The DiSparity team has developed various state of the digital integrity methods (e.g. NoisePrint, GAN fingerprint and ManTra-Net), physical integrity (e.g. segmentation-free light direction estimation and analysis of incident light direction). },
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}

Xie, Hanchen; Hussein, Mohamed E; Galstyan, Aram; Abd-Almageed, Wael
Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), 2021.
@conference{Xie2021,
title = {MUSCLE: Strengthening Semi-Supervised Learning Via Concurrent Unsupervised Learning Using Mutual Information Maximization},
author = {Hanchen Xie and Mohamed E Hussein and Aram Galstyan and Wael Abd-Almageed},
url = {https://arxiv.org/pdf/2012.00150.pdf
/hanchen-2021-muscle},
year = {2021},
date = {2021-01-30},
urldate = {2021-01-30},
booktitle = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)},
journal = {Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision},
pages = {2586-2595},
abstract = {Deep neural networks are powerful, massively parameterized machine learning models that have been shown to perform well in supervised learning tasks. However, very large amounts of labeled data are usually needed to train deep neural networks. Several semi-supervised learning approaches have been proposed to train neural networks using smaller amounts of labeled data with a large amount of unlabeled data. The performance of these semisupervised methods significantly degrades as the size of labeled data decreases. We introduce Mutual-informationbased Unsupervised & Semi-supervised Concurrent LEarning (MUSCLE), a hybrid learning approach that uses mutual information to combine both unsupervised and semisupervised learning. MUSCLE can be used as a standalone training scheme for neural networks, and can also be incorporated into other learning approaches. We show that the proposed hybrid model outperforms state of the art on several standard benchmarks, including CIFAR-10, CIFAR100, and Mini-Imagenet. Furthermore, the performance gain consistently increases with the reduction in the amount of labeled data, as well as in the presence of bias. We also show that MUSCLE has the potential to boost the classification performance when used in the fine-tuning phase for a model pre-trained only on unlabeled data.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Cheng, Jiaxin; Nandi, Soumyaroop; Natarajan, Prem; Abd-Almageed, Wael
Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021.
@conference{Cheng2021,
title = {Sign: Spatial-information incorporated generative network for generalized zero-shot semantic segmentation},
author = {Jiaxin Cheng and Soumyaroop Nandi and Prem Natarajan and Wael Abd-Almageed},
url = {https://arxiv.org/pdf/2108.12517.pdf},
year = {2021},
date = {2021-01-21},
urldate = {2021-01-21},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
journal = {Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages = {9556-9566},
abstract = {Unlike conventional zero-shot classification, zero-shot semantic segmentation predicts a class label at the pixel level instead of the image level. When solving zero-shot semantic segmentation problems, the need for pixel-level prediction with surrounding context motivates us to incorporate spatial information using positional encoding. We improve standard positional encoding by introducing the concept of Relative Positional Encoding, which integrates spatial information at the feature level and can handle arbitrary image sizes. Furthermore, while self-training is widely used in zero-shot semantic segmentation to generate pseudo-labels, we propose a new knowledge-distillation-inspired self-training strategy, namely Annealed Self-Training, which can automatically assign different importance to pseudo-labels to improve performance. We systematically study the proposed Relative Positional Encoding and Annealed Self-Training in a comprehensive experimental evaluation, and our empirical results confirm the effectiveness of our method on three benchmark datasets.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Rostami, Mohammad; Spinoulas, Leonidas; Hussein, Mohamed; Mathai, Joe; Abd-Almageed, Wael
Detection and continual learning of novel face presentation attacks Conference
Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021.
@conference{Rostami2021,
title = {Detection and continual learning of novel face presentation attacks},
author = {Mohammad Rostami and Leonidas Spinoulas and Mohamed Hussein and Joe Mathai and Wael Abd-Almageed},
url = {https://openaccess.thecvf.com/content/ICCV2021/html/Rostami_Detection_and_Continual_Learning_of_Novel_Face_Presentation_Attacks_ICCV_2021_paper.html
/spinoulas-2021-detection},
year = {2021},
date = {2021-01-20},
urldate = {2021-01-20},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
journal = {Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages = {14851-14860},
abstract = {Advances in deep learning, combined with availability of large datasets, have led to impressive improvements in face presentation attack detection research. However, state of the art face antispoofing systems are still vulnerable to novel types of attacks that are never seen during training. Moreover, even if such attacks are correctly detected, these systems lack the ability to adapt to newly encountered attacks. The post-training ability of continually detecting new types of attacks and self-adaptation to identify these attack types, after the initial detection phase, is highly appealing. In this paper, we enable a deep neural network to detect anomalies in the observed input data points as potential new types of attacks by suppressing the confidence-level of the network outside the training samples' distribution. We then use experience replay to update the model to incorporate knowledge about new types of attacks without forgetting the past learned attack types. Experimental results are provided to demonstrate the effectiveness of the proposed method on the OULU and Idiap datasets as well as a newly introduced dataset, all of which exhibit a variety of attack types.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Sabir, Ekraam; Nandi, Soumyaroop; Abd-Almageed, Wael; Natarajan, Prem
BioFors: A Large Biomedical Image Forensics Dataset Conference
Proceedings of the 2021 IEEE/CVF International Conference on Computer Vision (ICCV), 2021.
@conference{Sabir2021,
title = {BioFors: A Large Biomedical Image Forensics Dataset},
author = {Ekraam Sabir and Soumyaroop Nandi and Wael Abd-Almageed and Prem Natarajan},
url = {https://openaccess.thecvf.com/content/ICCV2021/html/Sabir_BioFors_A_Large_Biomedical_Image_Forensics_Dataset_ICCV_2021_paper.html
/ekraam-2021-biofors/},
year = {2021},
date = {2021-01-15},
urldate = {2021-01-15},
booktitle = {Proceedings of the 2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
journal = {Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages = {10963-10973},
abstract = {Research in media forensics has gained traction to combat the spread of misinformation. However, most of this research has been directed towards content generated on social media. Biomedical image forensics is a related problem, where manipulation or misuse of images reported in biomedical research documents is of serious concern. The problem has failed to gain momentum beyond an academic discussion due to an absence of benchmark datasets and standardized tasks. In this paper we present BioFors -- the first dataset for benchmarking common biomedical image manipulations. BioFors comprises 47,805 images extracted from 1,031 open-source research papers. Images in BioFors are divided into four categories -- Microscopy, Blot/Gel, FACS and Macroscopy. We also propose three tasks for forensic analysis -- external duplication detection, internal duplication detection and cut/sharp-transition detection. We benchmark BioFors on all tasks with suitable state-of-the-art algorithms. Our results and analysis show that existing algorithms developed on common computer vision datasets are not robust when applied to biomedical images, validating that more research is required to address the unique challenges of biomedical image forensics.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Sabir, Ekraam; Jaiswal, Ayush; AbdAlmageed, Wael; Natarajan, Prem
MEG: Multi-Evidence GNN for Multimodal Semantic Forensics Conference
Proceedings of the 25th International Conference on Pattern Recognition (ICPR), 2021.
@conference{Sabir2021b,
title = {MEG: Multi-Evidence GNN for Multimodal Semantic Forensics},
author = {Ekraam Sabir and Ayush Jaiswal and Wael AbdAlmageed and Prem Natarajan},
url = {https://ieeexplore.ieee.org/abstract/document/9413053
/ekraam-2021-meg},
doi = {10.1109/ICPR48806.2021.9413053},
year = {2021},
date = {2021-01-10},
urldate = {2021-01-10},
booktitle = {Proceedings of the 25th International Conference on Pattern Recognition (ICPR)},
journal = {2020 25th International Conference on Pattern Recognition (ICPR)},
pages = {9804-9811},
abstract = {Fake news often involves semantic manipulations across modalities such as image, text, location etc and requires the development of multimodal semantic forensics for its detection. Recent research has centered the problem around images, calling it image repurposing - where a digitally unmanipulated image is semantically misrepresented by means of its accompanying multimodal metadata such as captions, location, etc. The image and metadata together comprise a multimedia package. The problem setup requires algorithms to perform multimodal semantic forensics to authenticate a query multimedia package using a reference dataset of potentially related packages as evidences. Existing methods are limited to using a single evidence (retrieved package), which ignores potential performance improvement from the use of multiple evidences. In this work, we introduce a novel graph neural network based model for multimodal semantic forensics, which effectively utilizes multiple retrieved packages as evidences and is scalable with the number of evidences. We compare the scalability and performance of our model against existing methods. Experimental results show that the proposed model outperforms existing state-of-the-art algorithms with an error reduction of up to 25 %.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Hsu, I; Guo, Xiao; AbdAlmageed, Wael; Natarajan, Premkumar; Peng, Nanyun
MrGCN: Mirror Graph Convolution Network for Relation Extraction with Long-Term Dependencies Journal Article
In: arXiv preprint arXiv:2101.00124, 2021.
@article{Hsu2021,
title = {MrGCN: Mirror Graph Convolution Network for Relation Extraction with Long-Term Dependencies},
author = {I Hsu and Xiao Guo and Wael AbdAlmageed and Premkumar Natarajan and Nanyun Peng},
editor = {I Hsu and Xiao Guo and Wael AbdAlmageed and Premkumar Natarajan and Nanyun Peng},
url = {https://arxiv.org/abs/2101.00124},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {arXiv preprint arXiv:2101.00124},
abstract = {The ability to capture complex linguistic structures and long-term dependencies among words in the passage is essential for discourse-level relation extraction (DRE) tasks. Graph neural networks (GNNs), one of the methods to encode dependency graphs, have been shown effective in prior works for DRE. However, relatively little attention has been paid to receptive fields of GNNs, which can be crucial for cases with extremely long text that requires discourse understanding. In this work, we leverage the idea of graph pooling and propose to use pooling-unpooling framework on DRE tasks. The pooling branch reduces the graph size and enables the GNNs to obtain larger receptive fields within fewer layers; the unpooling branch restores the pooled graph to its original resolution so that representations for entity mention can be extracted. We propose Clause Matching (CM), a novel linguistically inspired graph pooling method for NLP tasks. Experiments on two DRE datasets demonstrate that our models significantly improve over baselines when modeling long-term dependencies is required, which shows the effectiveness of the pooling-unpooling framework and our CM pooling method.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2020

AbdAlmageed, Wael; Mirzaalian, Hengameh; Guo, Xiao; Randolph, Linda M; Tanawattanacharoen, Veeraya K; Geffner, Mitchell E; Ross, Heather M; Kim, Mimi S
Assessment of Facial Morphologic Features in Patients With Congenital Adrenal Hyperplasia Using Deep Learning Journal Article
In: JAMA network open, vol. 3, no. 11, pp. e2022199-e2022199, 2020.
@article{AbdAlmageed2020,
title = {Assessment of Facial Morphologic Features in Patients With Congenital Adrenal Hyperplasia Using Deep Learning},
author = {Wael AbdAlmageed and Hengameh Mirzaalian and Xiao Guo and Linda M Randolph and Veeraya K Tanawattanacharoen and Mitchell E Geffner and Heather M Ross and Mimi S Kim},
url = {https://jamanetwork.com/journals/jamanetworkopen/fullarticle/2773071
/abdalmageed-2020-assessment},
doi = {doi:10.1001/jamanetworkopen.2020.22199},
year = {2020},
date = {2020-11-02},
urldate = {2020-11-02},
journal = { JAMA network open},
volume = {3},
number = {11},
pages = {e2022199-e2022199},
abstract = {Importance Congenital adrenal hyperplasia (CAH) is the most common primary adrenal insufficiency in children, involving excess androgens secondary to disrupted steroidogenesis as early as the seventh gestational week of life. Although structural brain abnormalities are seen in CAH, little is known about facial morphology.
Objective To investigate differences in facial morphologic features between patients with CAH and control individuals with use of machine learning.
Design, Setting, and Participants This cross-sectional study was performed at a pediatric tertiary center in Southern California, from November 2017 to December 2019. Patients younger than 30 years with a biochemical diagnosis of classical CAH due to 21-hydroxylase deficiency and otherwise healthy controls were recruited from the clinic, and face images were acquired. Additional controls were selected from public face image data sets.
Main Outcomes and Measures The main outcome was prediction of CAH, as performed by machine learning (linear discriminant analysis, random forests, deep neural networks). Handcrafted features and learned representations were studied for CAH score prediction, and deformation analysis of facial landmarks and regionwise analyses were performed. A 6-fold cross-validation strategy was used to avoid overfitting and bias.
Results The study included 102 patients with CAH (62 [60.8%] female; mean [SD] age, 11.6 [7.1] years) and 59 controls (30 [50.8%] female; mean [SD] age, 9.0 [5.2] years) from the clinic and 85 controls (48 [60%] female; age, <29 years) from face databases. With use of deep neural networks, a mean (SD) AUC of 92% (3%) was found for accurately predicting CAH over 6 folds. With use of classical machine learning and handcrafted facial features, mean (SD) AUCs of 86% (5%) in linear discriminant analysis and 83% (3%) in random forests were obtained for predicting CAH over 6 folds. There was a deviation of facial features between groups using deformation fields generated from facial landmark templates. Regionwise analysis and class activation maps (deep learning of regions) revealed that the nose and upper face were most contributory (mean [SD] AUC: 69% [17%] and 71% [13%], respectively).
Conclusions and Relevance The findings suggest that facial morphologic features in patients with CAH is distinct and that deep learning can discover subtle facial features to predict CAH. Longitudinal study of facial morphology as a phenotypic biomarker may help expand understanding of adverse lifespan outcomes for patients with CAH.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Objective To investigate differences in facial morphologic features between patients with CAH and control individuals with use of machine learning.
Design, Setting, and Participants This cross-sectional study was performed at a pediatric tertiary center in Southern California, from November 2017 to December 2019. Patients younger than 30 years with a biochemical diagnosis of classical CAH due to 21-hydroxylase deficiency and otherwise healthy controls were recruited from the clinic, and face images were acquired. Additional controls were selected from public face image data sets.
Main Outcomes and Measures The main outcome was prediction of CAH, as performed by machine learning (linear discriminant analysis, random forests, deep neural networks). Handcrafted features and learned representations were studied for CAH score prediction, and deformation analysis of facial landmarks and regionwise analyses were performed. A 6-fold cross-validation strategy was used to avoid overfitting and bias.
Results The study included 102 patients with CAH (62 [60.8%] female; mean [SD] age, 11.6 [7.1] years) and 59 controls (30 [50.8%] female; mean [SD] age, 9.0 [5.2] years) from the clinic and 85 controls (48 [60%] female; age, <29 years) from face databases. With use of deep neural networks, a mean (SD) AUC of 92% (3%) was found for accurately predicting CAH over 6 folds. With use of classical machine learning and handcrafted facial features, mean (SD) AUCs of 86% (5%) in linear discriminant analysis and 83% (3%) in random forests were obtained for predicting CAH over 6 folds. There was a deviation of facial features between groups using deformation fields generated from facial landmark templates. Regionwise analysis and class activation maps (deep learning of regions) revealed that the nose and upper face were most contributory (mean [SD] AUC: 69% [17%] and 71% [13%], respectively).
Conclusions and Relevance The findings suggest that facial morphologic features in patients with CAH is distinct and that deep learning can discover subtle facial features to predict CAH. Longitudinal study of facial morphology as a phenotypic biomarker may help expand understanding of adverse lifespan outcomes for patients with CAH.

Arab, Mohammad Amin; Moghadam, Puria Azadi; Hussein, Mohamed; Abd-Almageed, Wael; Hefeeda, Mohamed
Revealing True Identity: Detecting Makeup Attacks in Face-based Biometric Systems Conference
Proceedings of the 28th ACM International Conference on Multimedia (ACM MM) Seattle, WA USA, 2020.
@conference{Arab2020,
title = {Revealing True Identity: Detecting Makeup Attacks in Face-based Biometric Systems},
author = {Mohammad Amin Arab and Puria Azadi Moghadam and Mohamed Hussein and Wael Abd-Almageed and Mohamed Hefeeda },
url = {https://dl.acm.org/doi/pdf/10.1145/3394171.3413606
/hussein-2020-revealing},
doi = {https://doi.org/10.1145/3394171.3413606},
year = {2020},
date = {2020-10-20},
urldate = {2020-10-20},
booktitle = {Proceedings of the 28th ACM International Conference on Multimedia (ACM MM) Seattle, WA USA},
journal = {Proceedings of the 28th ACM International Conference on Multimedia (ACM MM) Seattle, WA USA, 12-16 October 2020},
pages = {3568–3576},
abstract = {Face-based authentication systems are among the most commonly used biometric systems, because of the ease of capturing face images at a distance and in non-intrusive way. These systems are, however, susceptible to various presentation attacks, including printed faces, artificial masks, and makeup attacks. In this paper, we propose a novel solution to address makeup attacks, which are the hardest to detect in such systems because makeup can substantially alter the facial features of a person, including making them appear older/younger by adding/hiding wrinkles, modifying the shape of eyebrows, beard, and moustache, and changing the color of lips and cheeks. In our solution, we design a generative adversarial network for removing the makeup from face images while retaining their essential facial features and then compare the face images before and after removing makeup. We collect a large dataset of various types of makeup, especially malicious makeup that can be used to break into remote unattended security systems. This dataset is quite different from existing makeup datasets that mostly focus on cosmetic aspects. We conduct an extensive experimental study to evaluate our method and compare it against the state-of-the art using standard objective metrics commonly used in biometric systems as well as subjective metrics collected through a user study. Our results show that the proposed solution produces high accuracy and substantially outperforms the closest works in the literature.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Masi, Iacopo; Killekar, Aditya; Mascarenhas, Royston Marian; Gurudatt, Shenoy Pratik; AbdAlmageed, Wael
Two-branch recurrent network for isolating deepfakes in videos Conference
Proceedings of the 16th European Conference on Computer Vision – ECCV. Lecture Notes in Computer Science, vol. 12352, Springer, 2020.
@conference{Masi2020,
title = {Two-branch recurrent network for isolating deepfakes in videos},
author = {Iacopo Masi and Aditya Killekar and Royston Marian Mascarenhas and Shenoy Pratik Gurudatt and Wael AbdAlmageed},
editor = { Vedaldi A., Bischof H., Brox T., Frahm JM.},
url = {https://link.springer.com/chapter/10.1007/978-3-030-58571-6_39
/masi-2020-two},
doi = {https://doi.org/10.1007/978-3-030-58571-6_39},
year = {2020},
date = {2020-08-23},
urldate = {2020-08-23},
booktitle = {Proceedings of the 16th European Conference on Computer Vision – ECCV. Lecture Notes in Computer Science},
journal = {European Conference on Computer Vision},
volume = {12352},
pages = {667-684},
publisher = {Springer},
abstract = {The current spike of hyper-realistic faces artificially generated using deepfakes calls for media forensics solutions that are tailored to video streams and work reliably with a low false alarm rate at the video level. We present a method for deepfake detection based on a two-branch network structure that isolates digitally manipulated faces by learning to amplify artifacts while suppressing the high-level face content. Unlike current methods that extract spatial frequencies as a preprocessing step, we propose a two-branch structure: one branch propagates the original information, while the other branch suppresses the face content yet amplifies multi-band frequencies using a Laplacian of Gaussian (LoG) as a bottleneck layer. To better isolate manipulated faces, we derive a novel cost function that, unlike regular classification, compresses the variability of natural faces and pushes away the unrealistic facial samples in the feature space. Our two novel components show promising results on the FaceForensics+ +, Celeb-DF, and Facebook’s DFDC preview benchmarks, when compared to prior work. We then offer a full, detailed ablation study of our network architecture and cost function. Finally, although the bar is still high to get very remarkable figures at a very low false alarm rate, our study shows that we can achieve good video-level performance when cross-testing in terms of video-level AUC.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}

Masi, Iacopo; Mathai, Joe; AbdAlmageed, Wael
Towards Learning Structure via Consensus for Face Segmentation and Parsing Journal Article
In: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Seattle, 16 – 18 June 2020 IEEE Xplore, 2020.
@article{nokey,
title = {Towards Learning Structure via Consensus for Face Segmentation and Parsing},
author = {Iacopo Masi and Joe Mathai and Wael AbdAlmageed },
editor = {Iacopo Masi and Joe Mathai and Wael AbdAlmageed },
url = {https://openaccess.thecvf.com/content_CVPR_2020/papers/Masi_Towards_Learning_Structure_via_Consensus_for_Face_Segmentation_and_Parsing_CVPR_2020_paper.pdf},
doi = {10.1109/CVPR42600.2020.00555 },
year = {2020},
date = {2020-08-05},
urldate = {2020-08-05},
journal = {IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Seattle, 16 – 18 June 2020 IEEE Xplore},
type = {BATL},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Guo, Xiao; Mirzaalian, Hengameh; Sabir, Ekraam; Jaiswal, Ayush; Abd-Almageed, Wael
Cord19sts: Covid-19 semantic textual similarity dataset Journal Article
In: arXiv preprint arXiv:2007.02461, 2020.
@article{nokey,
title = {Cord19sts: Covid-19 semantic textual similarity dataset},
author = {Xiao Guo and Hengameh Mirzaalian and Ekraam Sabir and Ayush Jaiswal and Wael Abd-Almageed},
editor = {Xiao Guo and Hengameh Mirzaalian and Ekraam Sabir and Ayush Jaiswal and Wael Abd-Almageed},
url = {https://arxiv.org/abs/2007.02461},
year = {2020},
date = {2020-07-05},
urldate = {2020-07-05},
journal = {arXiv preprint arXiv:2007.02461},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

W AbdAlmageed D Abati, E Adeli
2019 Index IEEE Transactions on Pattern Analysis and Machine Intelligence Vol. 41 Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 42, 2020.
@article{nokey,
title = {2019 Index IEEE Transactions on Pattern Analysis and Machine Intelligence Vol. 41},
author = {D Abati, W AbdAlmageed, E Adeli, M Aertsen, A Agudo, S Aguinaga, C Ahuja, N Ahuja, T Ajanthan, Z Akata, LT Alemu, M Algarni, L An, SE Anthony, S Anwar, R Arandjelovic, S Ardeshir, RT Arn, F Arrigoni, IB Ayed, RV Babu, AD Bagdanov, M Bahri, S Bai, X Bai, C Bailer, T Baltrusaitis, S Banerjee, SA Bargal, B Barz, D Batra, D Bau, E Beck, E Belilovsky, CF Benitez-Quiroz, A Bera, J Bian, A Birk, M Blaschko, L Blouvshtein},
editor = {D Abati, W AbdAlmageed, E Adeli, M Aertsen, A Agudo, S Aguinaga, C Ahuja, N Ahuja, T Ajanthan, Z Akata, LT Alemu, M Algarni, L An, SE Anthony, S Anwar, R Arandjelovic, S Ardeshir, RT Arn, F Arrigoni, IB Ayed, RV Babu, AD Bagdanov, M Bahri, S Bai, X Bai, C Bailer, T Baltrusaitis, S Banerjee, SA Bargal, B Barz, D Batra, D Bau, E Beck, E Belilovsky, CF Benitez-Quiroz, A Bera, J Bian, A Birk, M Blaschko, L Blouvshtein},
url = {https://www.computer.org/csdl/journal/tp/2020/01},
year = {2020},
date = {2020-04-07},
urldate = {2020-04-07},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = {42},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Jaiswal, Ayush; Moyer, Daniel; Steeg, Greg Ver; AbdAlmageed, Wael; Natarajan, Premkumar
Invariant representations through adversarial forgetting Journal Article
In: Proceedings of the AAAI Conference on Artificial Intelligence, 2020.
@article{nokey,
title = {Invariant representations through adversarial forgetting},
author = {Ayush Jaiswal and Daniel Moyer and Greg Ver Steeg and Wael AbdAlmageed and Premkumar Natarajan},
editor = {Ayush Jaiswal and Daniel Moyer and Greg Ver Steeg and Wael AbdAlmageed and Premkumar Natarajan},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/5850},
year = {2020},
date = {2020-04-03},
urldate = {2020-04-03},
journal = {Proceedings of the AAAI Conference on Artificial Intelligence},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Guo, Xiao; Mirzaalian, Hengameh; Abd-Almageed, Wael; Randolph, Linda M; Tanawattanacharoen, Veeraya K; Geffner, Mitchell E; Ross, Heather M; Kim, Mimi S
SAT-087 Identifying Distinct Facial Dysmorphology in Youth with Congenital Adrenal Hyperplasia Using Deep Learning Techniques Journal Article
In: Journal of the Endocrine Society, 2020.
@article{nokey,
title = {SAT-087 Identifying Distinct Facial Dysmorphology in Youth with Congenital Adrenal Hyperplasia Using Deep Learning Techniques},
author = {Xiao Guo and Hengameh Mirzaalian and Wael Abd-Almageed and Linda M Randolph and Veeraya K Tanawattanacharoen and Mitchell E Geffner and Heather M Ross and Mimi S Kim},
editor = {Xiao Guo and Hengameh Mirzaalian and Wael Abd-Almageed and Linda M Randolph and Veeraya K Tanawattanacharoen and Mitchell E Geffner and Heather M Ross and Mimi S Kim},
url = {https://academic.oup.com/jes/article/4/Supplement_1/SAT-087/5833219?login=true},
year = {2020},
date = {2020-04-01},
urldate = {2020-04-01},
journal = {Journal of the Endocrine Society},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Mathai,; Masi, I.; AbdAlmageed, W.
Does Generative Face Completion Help Face Recognition Journal Article
In: Proceedings of 12th IAPR International Conference on Biometrics (ICB) Crete, Greece, 4-7 June 2019 IEEE Xplore, 2020.
@article{nokey,
title = {Does Generative Face Completion Help Face Recognition },
author = {Mathai and I. Masi and W. AbdAlmageed },
editor = {Mathai and I. Masi and W. AbdAlmageed },
url = {https://ieeexplore.ieee.org/abstract/document/8987388
/mathai-2021-does},
doi = {10.1109/ICB45273.2019.8987388 },
year = {2020},
date = {2020-02-10},
urldate = {2020-02-10},
journal = {Proceedings of 12th IAPR International Conference on Biometrics (ICB) Crete, Greece, 4-7 June 2019 IEEE Xplore},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2019

Jaiswal, Ayush; Brekelmans, Rob; Moyer, Daniel; Steeg, Greg Ver; AbdAlmageed, Wael; Natarajan, Premkumar
Discovery and separation of features for invariant representation learning Journal Article
In: arXiv preprint arXiv:1912.00646, 2019.
@article{nokey,
title = {Discovery and separation of features for invariant representation learning},
author = {Ayush Jaiswal and Rob Brekelmans and Daniel Moyer and Greg Ver Steeg and Wael AbdAlmageed and Premkumar Natarajan},
editor = {Ayush Jaiswal and Rob Brekelmans and Daniel Moyer and Greg Ver Steeg and Wael AbdAlmageed and Premkumar Natarajan},
url = {https://arxiv.org/abs/1912.00646},
year = {2019},
date = {2019-12-02},
urldate = {2019-12-02},
journal = {arXiv preprint arXiv:1912.00646},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Xiong, Fei; AbdAlmageed, Wael
Unknown Presentation Attack Detection with Face RGB Images Journal Article
In: Proceedings of 9th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS) Los Angeles, CA/USA, 22-26 October 2018 IEEE Xplore, 2019.
@article{nokey,
title = {Unknown Presentation Attack Detection with Face RGB Images},
author = {Fei Xiong and Wael AbdAlmageed },
editor = {Fei Xiong and Wael AbdAlmageed },
url = {https://ieeexplore.ieee.org/abstract/document/8698574},
doi = {10.1109/BTAS.2018.8698574},
year = {2019},
date = {2019-07-25},
urldate = {2019-07-25},
journal = {Proceedings of 9th IEEE International Conference on Biometrics: Theory, Applications, and Systems (BTAS) Los Angeles, CA/USA, 22-26 October 2018 IEEE Xplore},
type = {BATL},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

AbdAlmageed, Wael
Anti-spoofing's state-of-the-art: deep learning meets super sensors Journal Article
In: Biometric Technology Today, 2019.
@article{nokey,
title = { Anti-spoofing's state-of-the-art: deep learning meets super sensors},
author = {Wael AbdAlmageed},
editor = {Wael AbdAlmageed},
url = {https://www.sciencedirect.com/science/article/abs/pii/S0969476519300992},
year = {2019},
date = {2019-07-01},
urldate = {2019-07-01},
journal = {Biometric Technology Today},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Jaiswal, A.; Xia, S.; Masi, I.; AbdAlmageed, W.
RoPAD: Robust Presentation Attack Detection through Unsupervised Adversarial Invariance Journal Article
In: Proceedings of 12th IAPR International Conference on Biometrics (ICB) Crete, Greece, 2019.
@article{Jaiswal2019,
title = {RoPAD: Robust Presentation Attack Detection through Unsupervised Adversarial Invariance },
author = {A. Jaiswal and S. Xia and I. Masi and W. AbdAlmageed },
editor = {A. Jaiswal and S. Xia and I. Masi and W. AbdAlmageed },
url = {https://ieeexplore.ieee.org/abstract/document/8987276},
doi = {10.1109/ICB45273.2019.8987276},
year = {2019},
date = {2019-06-02},
urldate = {2019-06-02},
journal = {Proceedings of 12th IAPR International Conference on Biometrics (ICB) Crete, Greece},
type = {BATL},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Mirzaalian, Hengameh; Hussein, Mohamed; Abd-Almageed, Wael
On the Effectiveness of Laser Speckle Contrast Imaging and Deep Neural Networks for Detecting Known and Known Fingerprint Presentation Attacks Journal Article
In: Proceedings of 12th IAPR International Conference on Biometrics (ICB) Crete, Greece, 4-7 June 2019 , 2019.
@article{Mirzaalian2019,
title = {On the Effectiveness of Laser Speckle Contrast Imaging and Deep Neural Networks for Detecting Known and Known Fingerprint Presentation Attacks},
author = {Hengameh Mirzaalian and Mohamed Hussein and Wael Abd-Almageed },
editor = {Hengameh Mirzaalian and Mohamed Hussein and Wael Abd-Almageed },
url = {https://ieeexplore.ieee.org/abstract/document/8987428 },
doi = {10.1109/ICB45273.2019.8987428 },
year = {2019},
date = {2019-06-02},
urldate = {2019-06-02},
journal = {Proceedings of 12th IAPR International Conference on Biometrics (ICB) Crete, Greece, 4-7 June 2019 },
type = {BATL},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

yush Jaiswal,; Wu, Yue; AbdAlmageed, Wael; Natarajan, Premkumar
Unified adversarial invariance Journal Article
In: arXiv preprint arXiv:1905.03629, 2019.
@article{nokey,
title = {Unified adversarial invariance},
author = {yush Jaiswal and Yue Wu and Wael AbdAlmageed and Premkumar Natarajan},
editor = {yush Jaiswal and Yue Wu and Wael AbdAlmageed and Premkumar Natarajan},
url = {https://arxiv.org/abs/1905.03629},
year = {2019},
date = {2019-05-07},
urldate = {2019-05-07},
journal = {arXiv preprint arXiv:1905.03629},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Sun, C.; Jagannathan, A.; Habif, J. L.; Hussein, M.; Spinoulas, L.; Abd-Almageed, W.
Quantitative Laser Speckle Contrast Imaging for Presentation Attack Detection in Biometric Authentication Systems Journal Article
In: Proceedings of SPIE 11020, Smart Biomedical and Physiological Sensor Technology XV, 1102008 SPIE Digital Library , 2019.
@article{nokey,
title = {Quantitative Laser Speckle Contrast Imaging for Presentation Attack Detection in Biometric Authentication Systems },
author = {C. Sun and A. Jagannathan and J. L. Habif and M. Hussein and L. Spinoulas and W. Abd-Almageed },
editor = {C. Sun and A. Jagannathan and J. L. Habif and M. Hussein and L. Spinoulas and W. Abd-Almageed },
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11020/1102008/Quantitative-laser-speckle-contrast-imaging-for-presentation-attack-detection-in/10.1117/12.2518268.full?SSO=1},
year = {2019},
date = {2019-05-02},
urldate = {2019-05-02},
journal = {Proceedings of SPIE 11020, Smart Biomedical and Physiological Sensor Technology XV, 1102008 SPIE Digital Library },
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Hussein, Mohamed; Spinoulas, Leonidas; Xiong, Fei; AbdAlmageed, Wael
Fingerprint Presentation Attack Detection Using a Novel Multi-Spectral Capture Device and Patch-Based Convolutional Neural Networks Journal Article
In: Proceedings of IEEE International Workshop on Information Forensics and Security (WIFS), 2019.
@article{Hussein2019,
title = {Fingerprint Presentation Attack Detection Using a Novel Multi-Spectral Capture Device and Patch-Based Convolutional Neural Networks },
author = {Mohamed Hussein and Leonidas Spinoulas and Fei Xiong and Wael AbdAlmageed},
editor = {Mohamed Hussein and Leonidas Spinoulas and Fei Xiong and Wael AbdAlmageed},
url = {https://ieeexplore.ieee.org/abstract/document/8630773},
doi = {10.1109/WIFS.2018.8630773},
year = {2019},
date = {2019-01-31},
urldate = {2019-01-31},
journal = {Proceedings of IEEE International Workshop on Information Forensics and Security (WIFS)},
type = {BATL},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

X., Shi; Wu, Y.; Cao, H.; Burns, G.
Layout-aware Subfigure Decomposition for Complex Figures in the Biomedical Literature Journal Article
In: International Conference on Acoustics, Speech, and Signal Processing (ICASSP), 2019, pp. 1343-1347, 2019.
@article{nokey,
title = {Layout-aware Subfigure Decomposition for Complex Figures in the Biomedical Literature},
author = {Shi X. and Y. Wu and H. Cao and G. Burns},
editor = {Shi X. and Y. Wu and H. Cao and G. Burns},
year = {2019},
date = {2019-01-24},
urldate = {2019-01-24},
journal = {International Conference on Acoustics, Speech, and Signal Processing (ICASSP), 2019},
pages = {1343-1347},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Y., Wu; Abd-Almageed, W.; Natarajan, P.
ManTra-Net: Manipulation Tracking Network for Detection and Localization of Image Forgeries with Anomalous Features Journal Article
In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 9543-9552, 2019.
@article{nokey,
title = {ManTra-Net: Manipulation Tracking Network for Detection and Localization of Image Forgeries with Anomalous Features},
author = {Wu Y. and W. Abd-Almageed and P. Natarajan},
editor = {Wu Y. and W. Abd-Almageed and P. Natarajan},
url = {https://openaccess.thecvf.com/content_CVPR_2019/papers/Wu_ManTra-Net_Manipulation_Tracing_Network_for_Detection_and_Localization_of_Image_CVPR_2019_paper.pdf
/zhiqi-2019-mantra},
year = {2019},
date = {2019-01-17},
urldate = {2019-01-17},
journal = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019},
pages = {9543-9552},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Jaiswal, Ayush; Wu, Yue; AbdAlmageed, Wael; Masi, Iacopo; Natarajan, Premkumar
Aird: Adversarial learning framework for image repurposing detection Journal Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019.
@article{nokey,
title = {Aird: Adversarial learning framework for image repurposing detection},
author = {Ayush Jaiswal and Yue Wu and Wael AbdAlmageed and Iacopo Masi and Premkumar Natarajan},
editor = {Ayush Jaiswal and Yue Wu and Wael AbdAlmageed and Iacopo Masi and Premkumar Natarajan},
url = {https://openaccess.thecvf.com/content_CVPR_2019/html/Jaiswal_AIRD_Adversarial_Learning_Framework_for_Image_Repurposing_Detection_CVPR_2019_paper.html
/jaiswal-2019-aird
/jaiswal-2019-aird},
year = {2019},
date = {2019-01-02},
urldate = {2019-01-02},
journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

J., Cheng; Wu, Y.; Abd-Almageed, W.; Natarajan, P.
QATM: Quality-Aware Template Matching for Deep learning Bachelor Thesis
2019.
@bachelorthesis{nokey,
title = {QATM: Quality-Aware Template Matching for Deep learning},
author = {Cheng J. and Y. Wu and W. Abd-Almageed and P. Natarajan},
editor = {Cheng J. and Y. Wu and W. Abd-Almageed and P. Natarajan},
url = {https://openaccess.thecvf.com/content_CVPR_2019/papers/Cheng_QATM_Quality-Aware_Template_Matching_for_Deep_Learning_CVPR_2019_paper.pdf
/cheng-2019-qatm},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019},
pages = {11553-11562},
keywords = {},
pubstate = {published},
tppubtype = {bachelorthesis}
}

Sabir, Ekraam; Cheng, Jiaxin; Jaiswal, Ayush; AbdAlmageed, Wael; Masi, Iacopo; Natarajan, Prem
Recurrent convolutional strategies for face manipulation detection in videos Journal Article
In: Interfaces (GUI), vol. 3, 2019.
@article{nokey,
title = {Recurrent convolutional strategies for face manipulation detection in videos},
author = {Ekraam Sabir and Jiaxin Cheng and Ayush Jaiswal and Wael AbdAlmageed and Iacopo Masi and Prem Natarajan},
editor = {Ekraam Sabir and Jiaxin Cheng and Ayush Jaiswal and Wael AbdAlmageed and Iacopo Masi and Prem Natarajan},
url = {https://openaccess.thecvf.com/content_CVPRW_2019/papers/Media%20Forensics/Sabir_Recurrent_Convolutional_Strategies_for_Face_Manipulation_Detection_in_Videos_CVPRW_2019_paper.pdf},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {Interfaces (GUI)},
volume = {3},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2018

Li, Zekun; Wu, Yue; Abd-Almageed, Wael; Natarajan, Prem
Weighted Feature Pooling Network in Template-Based Recognition Journal Article
In: Asian Conference on Computer Vision, 2018.
@article{nokey,
title = { Weighted Feature Pooling Network in Template-Based Recognition},
author = {Zekun Li and Yue Wu and Wael Abd-Almageed and Prem Natarajan},
editor = {Zekun Li and Yue Wu and Wael Abd-Almageed and Prem Natarajan},
url = {https://link.springer.com/chapter/10.1007/978-3-030-20873-8_28},
year = {2018},
date = {2018-12-02},
urldate = {2018-12-02},
journal = {Asian Conference on Computer Vision},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

A., Jaiswal; Wu, Y.; Natarajan, P.
Unsupervised Adversarial Invariance Journal Article
In: Proceedings of Advances in Neural Information Processing Systems (NIPS). ACM, 2018, pp. 5097-5107, 2018.
@article{nokey,
title = {Unsupervised Adversarial Invariance},
author = {Jaiswal A. and Y. Wu and P. Natarajan},
editor = {Jaiswal A. and Y. Wu and P. Natarajan},
url = {https://arxiv.org/pdf/1809.10083.pdf
/mathai-2018-unsupervised},
year = {2018},
date = {2018-09-26},
urldate = {2018-09-26},
journal = {Proceedings of Advances in Neural Information Processing Systems (NIPS). ACM, 2018},
pages = {5097-5107},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

J., Cheng; Wu, Y.; Abd-Almageed, W.; Natarajan, P.
Image-to-GPS Verification Through a Bottom-Up Pattern Matching Network Journal Article
In: Asian Conference on Computer Vision, pp. 546-561. Springer, Cham, 2018, 2018.
@article{nokey,
title = {Image-to-GPS Verification Through a Bottom-Up Pattern Matching Network},
author = {Cheng J. and Y. Wu and W. Abd-Almageed and P. Natarajan},
editor = {Cheng J. and Y. Wu and W. Abd-Almageed and P. Natarajan},
year = {2018},
date = {2018-07-11},
urldate = {2018-07-11},
journal = {Asian Conference on Computer Vision, pp. 546-561. Springer, Cham, 2018},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

E., Sabir; Wu, Y.; Abd-Almageed, W.; Natarajan, P.
Deep Multimodal Image-Repurposing Detection Journal Article
In: Proceedings of 26th ACM Multimedia Conference (ACMMM). ACM, 2018, pp. 1337-1345, 2018.
@article{nokey,
title = {Deep Multimodal Image-Repurposing Detection},
author = {Sabir E. and Y. Wu and W. Abd-Almageed and P. Natarajan},
editor = {Sabir E. and Y. Wu and W. Abd-Almageed and P. Natarajan},
year = {2018},
date = {2018-06-13},
urldate = {2018-06-13},
journal = {Proceedings of 26th ACM Multimedia Conference (ACMMM). ACM, 2018, pp. 1337-1345},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Rawls, S.; Cao, H.; Mathai, J.; Natarajan,
How To Efficiently Increase Resolution in Neural OCR Models. Journal Article
In: 2018 2nd International Workshop on Arabic Script Analysis and Recognition (ASAR). 2018, 2018.
@article{nokey,
title = {How To Efficiently Increase Resolution in Neural OCR Models. },
author = {Rawls, S. and Cao, H. and Mathai, J. and Natarajan},
editor = {Rawls, S. and Cao, H. and Mathai, J. and Natarajan},
year = {2018},
date = {2018-04-11},
urldate = {2018-04-11},
journal = {2018 2nd International Workshop on Arabic Script Analysis and Recognition (ASAR). 2018},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Jaiswal, A.; Abd-Almageed, W.; Wu, Y.; Natarajan, P
Bidirectional Conditional Generative Adversarial Networks Journal Article
In: Proceedings of Asian Conference on Computer Vision (ACCV), Springer, 2018, pp. 216-232, 2018.
@article{nokey,
title = {Bidirectional Conditional Generative Adversarial Networks},
author = {Jaiswal, A. and Abd-Almageed, W. and Wu, Y. and Natarajan, P},
editor = {Jaiswal, A. and Abd-Almageed, W. and Wu, Y. and Natarajan, P},
year = {2018},
date = {2018-02-08},
urldate = {2018-02-08},
journal = {Proceedings of Asian Conference on Computer Vision (ACCV), Springer, 2018},
pages = {216-232},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Jaiswal, Ayush; AbdAlmageed, Wael; Wu, Yue; Natarajan, Premkumar
Capsulegan: Generative adversarial capsule network Journal Article
In: Proceedings of the European conference on computer vision (ECCV) workshops, 2018.
@article{nokey,
title = {Capsulegan: Generative adversarial capsule network},
author = {Ayush Jaiswal and Wael AbdAlmageed and Yue Wu and Premkumar Natarajan},
editor = {Ayush Jaiswal and Wael AbdAlmageed and Yue Wu and Premkumar Natarajan},
url = {https://openaccess.thecvf.com/content_eccv_2018_workshops/w17/html/Jaiswal_CapsuleGAN_Generative_Adversarial_Capsule_Network_ECCVW_2018_paper.html},
year = {2018},
date = {2018-02-07},
urldate = {2018-02-07},
journal = {Proceedings of the European conference on computer vision (ECCV) workshops},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Masi, I.; Chang, F.; Choi, J.; Harel, S.; Kim, J.; Kim, K.; Leksut, J.; Rawls, S.; Wu, Y.; Hassner, T.; AbdAlmageed, W.; Medioni, G.; Morency, L.; Natarajan, P.; Nevatia,
Learning Pose-Aware Models for Pose-Invariant Face Recognition in the Wild. Journal Article
In: IEEE Trans. on Pattern Analysis and Machine Intelligence. January 2018, 2018.
@article{nokey,
title = {Learning Pose-Aware Models for Pose-Invariant Face Recognition in the Wild. },
author = {Masi, I. and Chang, F. and Choi, J. and Harel, S. and Kim, J. and Kim, K. and Leksut, J. and Rawls, S. and Wu, Y. and Hassner, T. and AbdAlmageed, W. and Medioni, G. and Morency, L. and Natarajan, P. and Nevatia},
editor = {Masi, I. and Chang, F. and Choi, J. and Harel, S. and Kim, J. and Kim, K. and Leksut, J. and Rawls, S. and Wu, Y. and Hassner, T. and AbdAlmageed, W. and Medioni, G. and Morency, L. and Natarajan, P. and Nevatia},
url = {https://bibbase.org/network/publication/masi-chang-choi-harel-kim-kim-leksut-rawls-etal-learningposeawaremodelsforposeinvariantfacerecognitioninthewild-2018},
year = {2018},
date = {2018-01-19},
urldate = {2018-01-19},
journal = {IEEE Trans. on Pattern Analysis and Machine Intelligence. January 2018},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Y., Wu; Abd-Almageed, W.; Natarajan, P.
Image Copy-Move Forgery Detection via an End-to-End Deep Neural Network Journal Article
In: IEEE Winter Conference on Applications of Computer Vision (WACV). 2018, pp. 1907–1915, 2018.
@article{nokey,
title = {Image Copy-Move Forgery Detection via an End-to-End Deep Neural Network},
author = {Wu Y. and W. Abd-Almageed and P. Natarajan},
editor = {Wu Y. and W. Abd-Almageed and P. Natarajan},
year = {2018},
date = {2018-01-18},
urldate = {2018-01-18},
journal = {IEEE Winter Conference on Applications of Computer Vision (WACV). 2018, pp. 1907–1915},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Wu, Yue; Abd-Almageed, Wael; Natarajan, Prem
Busternet: Detecting copy-move image forgery with source/target localization Journal Article
In: Proceedings of the European Conference on Computer Vision (ECCV), 2018.
@article{nokey,
title = {Busternet: Detecting copy-move image forgery with source/target localization},
author = {Yue Wu and Wael Abd-Almageed and Prem Natarajan},
editor = {Yue Wu and Wael Abd-Almageed and Prem Natarajan},
url = {https://openaccess.thecvf.com/content_ECCV_2018/html/Rex_Yue_Wu_BusterNet_Detecting_Copy-Move_ECCV_2018_paper.html},
year = {2018},
date = {2018-01-18},
urldate = {2018-01-18},
journal = {Proceedings of the European Conference on Computer Vision (ECCV)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

A., Jaiswal; Wu, Y.; Abd-Almageed, W.; Natarajan, P.
CapsuleGAN: Generative Adversarial Capsule Network Journal Article
In: Proceedings of European Conference on Computer Vision Workshop (ECCVW) on Brain-Driven Computer Vision (BDCV), Springer, 2018, pp. 526-535, 2018.
@article{nokey,
title = {CapsuleGAN: Generative Adversarial Capsule Network},
author = {Jaiswal A. and Y. Wu and W. Abd-Almageed and P. Natarajan},
editor = {Jaiswal A. and Y. Wu and W. Abd-Almageed and P. Natarajan},
year = {2018},
date = {2018-01-10},
urldate = {2018-01-10},
journal = {Proceedings of European Conference on Computer Vision Workshop (ECCVW) on Brain-Driven Computer Vision (BDCV), Springer, 2018},
pages = {526-535},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2017

Jaiswal, Ayush; Sabir, Ekraam; AbdAlmageed, Wael; Natarajan, Premkumar
Multimedia semantic integrity assessment using joint embedding of images and text Journal Article
In: Proceedings of the 25th ACM international conference on Multimedia, 2017.
@article{nokey,
title = {Multimedia semantic integrity assessment using joint embedding of images and text},
author = {Ayush Jaiswal and Ekraam Sabir and Wael AbdAlmageed and Premkumar Natarajan},
editor = {Ayush Jaiswal and Ekraam Sabir and Wael AbdAlmageed and Premkumar Natarajan},
url = {https://dl.acm.org/doi/abs/10.1145/3123266.3123385},
year = {2017},
date = {2017-10-19},
urldate = {2017-10-19},
journal = {Proceedings of the 25th ACM international conference on Multimedia},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Wu, Yue; Abd-Almageed, Wael; Natarajan, Prem
Deep matching and validation network: An end-to-end solution to constrained image splicing localization and detection Journal Article
In: Proceedings of the 25th ACM international conference on Multimedia, pp. 1480-1502, 2017.
@article{nokey,
title = {Deep matching and validation network: An end-to-end solution to constrained image splicing localization and detection},
author = {Yue Wu and Wael Abd-Almageed and Prem Natarajan},
editor = {Yue Wu and Wael Abd-Almageed and Prem Natarajan},
url = {https://dl.acm.org/doi/abs/10.1145/3123266.3123411},
year = {2017},
date = {2017-10-19},
urldate = {2017-10-19},
journal = {Proceedings of the 25th ACM international conference on Multimedia},
pages = {1480-1502},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Rawls, S.; Cao, H.; Sabir, E.; Natarajan, P
Combining deep learning and language modeling for segmentation-free OCR from raw pixels. Journal Article
In: 2017 1st International Workshop on Arabic Script Analysis and Recognition (ASAR), pages 119–123, April 2017, 2017.
@article{nokey,
title = {Combining deep learning and language modeling for segmentation-free OCR from raw pixels. },
author = {Rawls, S. and Cao, H. and Sabir, E. and Natarajan, P},
editor = {Rawls, S. and Cao, H. and Sabir, E. and Natarajan, P},
url = {https://bibbase.org/network/publication/rawls-cao-sabir-natarajan-combiningdeeplearningandlanguagemodelingforsegmentationfreeocrfromrawpixels-2017},
year = {2017},
date = {2017-06-14},
urldate = {2017-06-14},
journal = {2017 1st International Workshop on Arabic Script Analysis and Recognition (ASAR), pages 119–123, April 2017},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Wu, Yue; AbdAlmageed, Wael; Rawls, Stephen; Natarajan, Premkumar
EPAT: Euclidean Perturbation Analysis and Transform-An Agnostic Data Adaptation Framework for Improving Facial Landmark Detectors Journal Article
In: 2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017), 2017.
@article{nokey,
title = {EPAT: Euclidean Perturbation Analysis and Transform-An Agnostic Data Adaptation Framework for Improving Facial Landmark Detectors},
author = {Yue Wu and Wael AbdAlmageed and Stephen Rawls and Premkumar Natarajan},
editor = {Yue Wu and Wael AbdAlmageed and Stephen Rawls and Premkumar Natarajan},
url = {https://ieeexplore.ieee.org/abstract/document/7961745},
year = {2017},
date = {2017-05-30},
urldate = {2017-05-30},
journal = {2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Wu, Y.; Hassner, T.; Kim, K.; Medioni, G.; Natarajan, P
Facial landmark detection with tweaked convolutional neural networks. Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence. 2017, 2017.
@article{nokey,
title = {Facial landmark detection with tweaked convolutional neural networks. },
author = {Wu, Y. and Hassner, T. and Kim, K. and Medioni, G. and Natarajan, P},
editor = {Wu, Y. and Hassner, T. and Kim, K. and Medioni, G. and Natarajan, P},
url = {https://arxiv.org/abs/1511.04031},
year = {2017},
date = {2017-05-19},
urldate = {2017-05-19},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence. 2017},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Wu, Y.; Natarajan, P
Self-organized Text Detection with Minimal Post-processing via Border Learning. Journal Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5000–5009, 2017, 2017.
@article{nokey,
title = {Self-organized Text Detection with Minimal Post-processing via Border Learning. },
author = {Wu, Y. and Natarajan, P},
editor = {Wu, Y. and Natarajan, P},
url = {https://openaccess.thecvf.com/content_ICCV_2017/papers/Wu_Self-Organized_Text_Detection_ICCV_2017_paper.pdf},
year = {2017},
date = {2017-01-27},
urldate = {2017-01-27},
journal = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5000–5009, 2017},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Rawls, S.; Cao, H.; Kumar, S.; Natarajan, P
Combining Convolutional Neural Networks and LSTMs for Segmentation-Free OCR. Journal Article
In: IAPR International Conference on Document Analysis and Recognition (ICDAR),155–160. 2017, 2017.
@article{nokey,
title = {Combining Convolutional Neural Networks and LSTMs for Segmentation-Free OCR. },
author = {Rawls, S. and Cao, H. and Kumar, S. and Natarajan, P},
editor = {Rawls, S. and Cao, H. and Kumar, S. and Natarajan, P},
url = {https://ieeexplore.ieee.org/document/8269965},
year = {2017},
date = {2017-01-21},
urldate = {2017-01-21},
journal = {IAPR International Conference on Document Analysis and Recognition (ICDAR),155–160. 2017},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Wu, Y.; AbdAlmageed, W.; Natarajan, P
Deep Matching and Validation Network: An End-to-End Solution to Constrained Image Splicing Localization and Detection. Bachelor Thesis
2017.
@bachelorthesis{nokey,
title = {Deep Matching and Validation Network: An End-to-End Solution to Constrained Image Splicing Localization and Detection. },
author = {Wu, Y. and AbdAlmageed, W. and Natarajan, P},
editor = {Wu, Y. and AbdAlmageed, W. and Natarajan, P},
url = {https://arxiv.org/abs/1705.09765},
year = {2017},
date = {2017-01-12},
urldate = {2017-01-12},
journal = {Proceedings of the 2017 ACM on Multimedia Conference, pages 1480–1502, 2017. ACM},
keywords = {},
pubstate = {published},
tppubtype = {bachelorthesis}
}
2016

Wu, Yue; Natarajan, Premkumar; Rawls, Stephen; AbdAlmageed, Wael
Learning document image binarization from data Journal Article
In: 2016 IEEE International Conference on Image Processing (ICIP), 2016.
@article{nokey,
title = {Learning document image binarization from data},
author = {Yue Wu and Premkumar Natarajan and Stephen Rawls and Wael AbdAlmageed},
editor = {Yue Wu and Premkumar Natarajan and Stephen Rawls and Wael AbdAlmageed},
url = {https://ieeexplore.ieee.org/abstract/document/7533063},
year = {2016},
date = {2016-09-25},
urldate = {2016-09-25},
journal = {2016 IEEE International Conference on Image Processing (ICIP)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Abd-Almageed, Wael; Wu, Y.; Rawls, S.; Harel, S.; Hassner, T.; Masi, I.; Choi, J.; Leksut, J.; Kim, J.; Natarajan, P.; Nevatia, R.; Medioni, G. G.
Face recognition using deep multi-pose representations Journal Article
In: Winter Conference on Applications of Computer Vision, 2016.
@article{nokey,
title = {Face recognition using deep multi-pose representations},
author = {Wael Abd-Almageed and Y. Wu and S. Rawls and S. Harel and T. Hassner and I. Masi and J. Choi and J. Leksut and J. Kim and P. Natarajan and R. Nevatia and G. G. Medioni},
editor = {Wael Abd-Almageed and Y. Wu and S. Rawls and S. Harel and T. Hassner and I. Masi and J. Choi and J. Leksut and J. Kim and P. Natarajan and R. Nevatia and G. G. Medioni},
url = {https://ieeexplore.ieee.org/abstract/document/7477555},
year = {2016},
date = {2016-01-14},
urldate = {2016-01-14},
journal = {Winter Conference on Applications of Computer Vision},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2015

Nagaraja, Varun K; Abd-Almageed, Wael
Feature selection using partial least squares regression and optimal experiment design Journal Article
In: 2015 International Joint Conference on Neural Networks (IJCNN), 2015.
@article{nokey,
title = {Feature selection using partial least squares regression and optimal experiment design},
author = {Varun K Nagaraja and Wael Abd-Almageed
},
editor = {Varun K Nagaraja and Wael Abd-Almageed
},
url = {https://ieeexplore.ieee.org/abstract/document/7280341},
year = {2015},
date = {2015-07-12},
urldate = {2015-07-12},
journal = {2015 International Joint Conference on Neural Networks (IJCNN)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Hefeeda, Mohamed; Abd-Almageed, Wael; Gao, Fei
Method for processing a large-scale data set, and associated apparatus Journal Article
In: 2015.
@article{nokey,
title = {Method for processing a large-scale data set, and associated apparatus},
author = {Mohamed Hefeeda and Wael Abd-Almageed and Fei Gao},
editor = {Mohamed Hefeeda and Wael Abd-Almageed and Fei Gao},
url = {https://patents.google.com/patent/US20150039538A1/en},
year = {2015},
date = {2015-02-05},
urldate = {2015-02-05},
keywords = {},
pubstate = {published},
tppubtype = {article}
}

Du, Xianzhi; Doermann, David; Abd-Almageed, Wael
A graphical model approach for matching partial signatures Journal Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1465-1472, 2015.
@article{nokey,
title = {A graphical model approach for matching partial signatures},
author = {Xianzhi Du and David Doermann and Wael Abd-Almageed},
editor = {Xianzhi Du and David Doermann and Wael Abd-Almageed},
url = {https://www.cv-foundation.org/openaccess/content_cvpr_2015/html/Du_A_Graphical_Model_2015_CVPR_paper.html},
year = {2015},
date = {2015-01-08},
urldate = {2015-01-08},
journal = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages = {1465-1472},
keywords = {},
pubstate = {published},
tppubtype = {article}
}