Maintained by Difan Deng and Marius Lindauer.
The following list considers papers related to neural architecture search. It is by no means complete. If you miss a paper on the list, please let us know.
Please note that although NAS methods steadily improve, the quality of empirical evaluations in this field are still lagging behind compared to other areas in machine learning, AI and optimization. We would therefore like to share some best practices for empirical evaluations of NAS methods, which we believe will facilitate sustained and measurable progress in the field. If you are interested in a teaser, please read our blog post or directly jump to our checklist.
Transformers have gained increasing popularity in different domains. For a comprehensive list of papers focusing on Neural Architecture Search for Transformer-Based spaces, the awesome-transformer-search repo is all you need.
5555
Chen, X.; Yang, C.
CIMNet: Joint Search for Neural Network and Computing-in-Memory Architecture Journal Article
In: IEEE Micro, no. 01, pp. 1-12, 5555, ISSN: 1937-4143.
@article{10551739,
title = {CIMNet: Joint Search for Neural Network and Computing-in-Memory Architecture},
author = {X. Chen and C. Yang},
url = {https://www.computer.org/csdl/magazine/mi/5555/01/10551739/1XyKBmSlmPm},
doi = {10.1109/MM.2024.3409068},
issn = {1937-4143},
year = {5555},
date = {5555-06-01},
urldate = {5555-06-01},
journal = {IEEE Micro},
number = {01},
pages = {1-12},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {Computing-in-memory (CIM) architecture has been proven to effectively transcend the memory wall bottleneck, expanding the potential of low-power and high-throughput applications such as machine learning. Neural architecture search (NAS) designs ML models to meet a variety of accuracy, latency, and energy constraints. However, integrating CIM into NAS presents a major challenge due to additional simulation overhead from the non-ideal characteristics of CIM hardware. This work introduces a quantization and device aware accuracy predictor that jointly scores quantization policy, CIM architecture, and neural network architecture, eliminating the need for time-consuming simulations in the search process. We also propose reducing the search space based on architectural observations, resulting in a well-pruned search space customized for CIM. These allow for efficient exploration of superior combinations in mere CPU minutes. Our methodology yields CIMNet, which consistently improves the trade-off between accuracy and hardware efficiency on benchmarks, providing valuable architectural insights.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yan, J.; Liu, J.; Xu, H.; Wang, Z.; Qiao, C.
Peaches: Personalized Federated Learning with Neural Architecture Search in Edge Computing Journal Article
In: IEEE Transactions on Mobile Computing, no. 01, pp. 1-17, 5555, ISSN: 1558-0660.
@article{10460163,
title = {Peaches: Personalized Federated Learning with Neural Architecture Search in Edge Computing},
author = {J. Yan and J. Liu and H. Xu and Z. Wang and C. Qiao},
doi = {10.1109/TMC.2024.3373506},
issn = {1558-0660},
year = {5555},
date = {5555-03-01},
urldate = {5555-03-01},
journal = {IEEE Transactions on Mobile Computing},
number = {01},
pages = {1-17},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {In edge computing (EC), federated learning (FL) enables numerous distributed devices (or workers) to collaboratively train AI models without exposing their local data. Most works of FL adopt a predefined architecture on all participating workers for model training. However, since workers' local data distributions vary heavily in EC, the predefined architecture may not be the optimal choice for every worker. It is also unrealistic to manually design a high-performance architecture for each worker, which requires intense human expertise and effort. In order to tackle this challenge, neural architecture search (NAS) has been applied in FL to automate the architecture design process. Unfortunately, the existing federated NAS frameworks often suffer from the difficulties of system heterogeneity and resource limitation. To remedy this problem, we present a novel framework, termed Peaches, to achieve efficient searching and training in the resource-constrained EC system. Specifically, the local model of each worker is stacked by base cell and personal cell, where the base cell is shared by all workers to capture the common knowledge and the personal cell is customized for each worker to fit the local data. We determine the number of base cells, shared by all workers, according to the bandwidth budget on the parameters server. Besides, to relieve the data and system heterogeneity, we find the optimal number of personal cells for each worker based on its computing capability. In addition, we gradually prune the search space during training to mitigate the resource consumption. We evaluate the performance of Peaches through extensive experiments, and the results show that Peaches can achieve an average accuracy improvement of about 6.29% and up to 3.97× speed up compared with the baselines.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2025
Öcal, Göktuğ; Özgövde, Atay
Network-aware federated neural architecture search Journal Article
In: Future Generation Computer Systems, vol. 162, pp. 107475, 2025, ISSN: 0167-739X.
@article{OCAL2025107475,
title = {Network-aware federated neural architecture search},
author = {Göktuğ Öcal and Atay Özgövde},
url = {https://www.sciencedirect.com/science/article/pii/S0167739X24004205},
doi = {https://doi.org/10.1016/j.future.2024.07.053},
issn = {0167-739X},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Future Generation Computer Systems},
volume = {162},
pages = {107475},
abstract = {The cooperation between Deep Learning (DL) and edge devices has further advanced technological developments, allowing smart devices to serve as both data sources and endpoints for DL-powered applications. However, the success of DL relies on optimal Deep Neural Network (DNN) architectures, and manually developing such systems requires extensive expertise and time. Neural Architecture Search (NAS) has emerged to automate the search for the best-performing neural architectures. Meanwhile, Federated Learning (FL) addresses data privacy concerns by enabling collaborative model development without exchanging the private data of clients. In a FL system, network limitations can lead to biased model training, slower convergence, and increased communication overhead. On the other hand, traditional DNN architecture design, emphasizing validation accuracy, often overlooks computational efficiency and size constraints of edge devices. This research aims to develop a comprehensive framework that effectively balances trade-offs between model performance, communication efficiency, and the incorporation of FL into an iterative NAS algorithm. This framework aims to overcome challenges by addressing the specific requirements of FL, optimizing DNNs through NAS, and ensuring computational efficiency while considering the network constraints of edge devices. To address these challenges, we introduce Network-Aware Federated Neural Architecture Search (NAFNAS), an open-source federated neural network pruning framework with network emulation support. Through comprehensive testing, we demonstrate the feasibility of our approach, efficiently reducing DNN size and mitigating communication challenges. Additionally, we propose Network and Distribution Aware Client Grouping (NetDAG), a novel client grouping algorithm tailored for FL with diverse DNN architectures, considerably enhancing efficiency of communication rounds and update balance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2024
Ouertatani, Houssem; Maxim, Cristian; Niar, Smail; Talbi, El-Ghazali
Accelerated NAS via pretrained ensembles and multi-fidelity Bayesian Optimization Proceedings Article
In: 33rd International Conference on Artificial Neural Networks (ICANN), Lugano, Switzerland, 2024.
@inproceedings{ouertatani:hal-04611343,
title = {Accelerated NAS via pretrained ensembles and multi-fidelity Bayesian Optimization},
author = {Houssem Ouertatani and Cristian Maxim and Smail Niar and El-Ghazali Talbi},
url = {https://hal.science/hal-04611343},
year = {2024},
date = {2024-09-01},
urldate = {2024-09-01},
booktitle = {33rd International Conference on Artificial Neural Networks (ICANN)},
address = {Lugano, Switzerland},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
(Ed.)
Medical Neural Architecture Search: Survey and Taxonomy Collection
2024.
@collection{Benmeziane-ijcai24a,
title = {Medical Neural Architecture Search: Survey and Taxonomy},
author = {Hadjer Benmeziane and Imane Hamzaoui and Kaoutar El Maghraoui and Kaoutar El Maghraoui},
url = {https://www.ijcai.org/proceedings/2024/0878.pdf},
year = {2024},
date = {2024-08-03},
urldate = {2024-08-03},
booktitle = {IJCAI 2024},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Jeevidha, S. Saraswathi S.
Learning Methods and Parameters used in Neural Architecture Search for Image Classification Journal Article
In: International Journal of Computer Applications, vol. 186, no. 32, pp. 19-24, 2024, ISSN: 0975-8887.
@article{10.5120/ijca2024923857,
title = {Learning Methods and Parameters used in Neural Architecture Search for Image Classification},
author = {S. Saraswathi S. Jeevidha},
url = {https://ijcaonline.org/archives/volume186/number32/learning-methods-and-parameters-used-in-neural-architecture-search-for-image-classification/},
doi = {10.5120/ijca2024923857},
issn = {0975-8887},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-01},
journal = {International Journal of Computer Applications},
volume = {186},
number = {32},
pages = {19-24},
publisher = {Foundation of Computer Science (FCS), NY, USA},
address = {New York, USA},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Improved Learning Through Neural Component Search PhD Thesis
2024.
@phdthesis{morgan-phds24a,
title = {Improved Learning Through Neural Component Search},
url = {https://shareok.org/handle/11244/340526},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
(Ed.)
A COMPARATIVE STUDY: DEEPFAKE VIDEO DETECTION WITH NASNETS AND LSTM Collection
2024.
@collection{nokey,
title = {A COMPARATIVE STUDY: DEEPFAKE VIDEO DETECTION WITH NASNETS AND LSTM},
author = {Farmanuddin FARMAN and Yılmaz ATAY and Cagri SAHIN},
url = {https://www.researchgate.net/profile/Farmanuddin-Farman/publication/381902059_A_COMPARATIVE_STUDY_DEEPFAKE_VIDEO_DETECTION_WITH_NASNETS_AND_LSTM/links/668408822aa57f3b82688f25/A-COMPARATIVE-STUDY-DEEPFAKE-VIDEO-DETECTION-WITH-NASNETS-AND-LSTM.pdf},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-01},
booktitle = {12TH INTERNATIONAL İSTANBUL SCIENTIFIC RESEARCH CONGRESS PROCEEDINGS BOOK},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Ge, Wanying
Spoofing-robust Automatic Speaker Verification: Architecture, Explainability and Joint Optimisation PhD Thesis
2024.
@phdthesis{Ge-phd24a,
title = {Spoofing-robust Automatic Speaker Verification: Architecture, Explainability and Joint Optimisation},
author = {Wanying Ge},
url = {https://theses.hal.science/tel-04633370/document},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Hosseini, Ramtin
Trustworthy Neural Architecture Search PhD Thesis
2024.
@phdthesis{Hosseini-phd24a,
title = {Trustworthy Neural Architecture Search},
author = {Hosseini, Ramtin},
url = {https://escholarship.org/content/qt7tr2f8zx/qt7tr2f8zx.pdf},
year = {2024},
date = {2024-08-01},
urldate = {2024-08-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Baniata, Hamza
SoK: quantum computing methods for machine learning optimization Journal Article
In: Quantum Machine Intelligence , vol. 6, 2024.
@article{Baniata-quantumMl24a,
title = {SoK: quantum computing methods for machine learning optimization},
author = {Hamza Baniata },
url = {https://link.springer.com/article/10.1007/s42484-024-00180-1},
year = {2024},
date = {2024-07-24},
urldate = {2024-07-24},
journal = {Quantum Machine Intelligence },
volume = {6},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wu, Ruoyou; Li, Cheng; Zou, Juan; Liu, Xinfeng; Zheng, Hairong; Wang, Shanshan
Generalizable Reconstruction for Accelerating MR Imaging via Federated Learning with Neural Architecture Search Journal Article
In: IEEE Trans Med Imaging . , 2024.
@article{Wu-TMI24a,
title = { Generalizable Reconstruction for Accelerating MR Imaging via Federated Learning with Neural Architecture Search },
author = {Ruoyou Wu and Cheng Li and Juan Zou and Xinfeng Liu and Hairong Zheng and Shanshan Wang
},
url = {https://pubmed.ncbi.nlm.nih.gov/39037877/},
doi = {10.1109/TMI.2024.3432388 },
year = {2024},
date = {2024-07-22},
urldate = {2024-07-22},
journal = { IEEE Trans Med Imaging . },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huang, Chien Yu; Tsai, Cheng-Che; Hwang, Lisa Alice; Kang, Bor-Hwang; Lin, Yaoh-Shiang; Su, Hsing-Hao; Shen, Guan‐Ting; Hsieh, Jun-Wei
SCC-NET: Segmentation of Clinical Cancer image for Head and Neck Squamous Cell Carcinoma Technical Report
2024.
@techreport{yu-24a,
title = {SCC-NET: Segmentation of Clinical Cancer image for Head and Neck Squamous Cell Carcinoma},
author = {Chien Yu Huang and Cheng-Che Tsai and Lisa Alice Hwang and Bor-Hwang Kang and Yaoh-Shiang Lin and Hsing-Hao Su and Guan‐Ting Shen and Jun-Wei Hsieh
},
url = {https://www.researchsquare.com/article/rs-4577408/v1},
year = {2024},
date = {2024-07-16},
urldate = {2024-07-16},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Asiimwe, Arnold; Das, William; Benmeziane, Hadjer; Maghraoui, Kaoutar El
EDGE2024, 2024.
@conference{Asiimwe-edge24a,
title = {EfficientMedSAM: Accelerating Medical Image Segmentation via Neural Architecture Search and Knowledge Distillation},
author = {Arnold Asiimwe and William Das and Hadjer Benmeziane and Kaoutar El Maghraoui},
url = {https://research.ibm.com/publications/efficientmedsam-accelerating-medical-image-segmentation-via-neural-architecture-search-and-knowledge-distillation},
year = {2024},
date = {2024-07-07},
urldate = {2024-07-07},
booktitle = {EDGE2024},
journal = {EDGE 2024},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
(Ed.)
Are Large Language Models Good Neural Architecture Generators for Edge? Collection
2024.
@collection{Benmeziane-edge24a,
title = {Are Large Language Models Good Neural Architecture Generators for Edge?},
author = {Hadjer Benmeziane and Kaoutar El Maghraoui},
url = {https://research.ibm.com/publications/are-large-language-models-good-neural-architecture-generators-for-edge},
year = {2024},
date = {2024-07-01},
urldate = {2024-07-01},
booktitle = {EDGE 2024},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Yin, Jia; Wang, Wei; Guo, Zhonghua; Ji, Yangchun
DTS: dynamic training slimming with feature sparsity for efficient convolutional neural network Journal Article
In: Journal of Real-Time Image Processing, 2024.
@article{Yin-jrtip24a,
title = {DTS: dynamic training slimming with feature sparsity for efficient convolutional neural network},
author = {
Jia Yin and Wei Wang and Zhonghua Guo and Yangchun Ji
},
url = {https://link.springer.com/article/10.1007/s11554-024-01511-y},
year = {2024},
date = {2024-07-01},
urldate = {2024-07-01},
journal = { Journal of Real-Time Image Processing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
Neural Architecture Search for Adversarial Robustness via Learnable Pruning Collection
2024.
@collection{Li-fhpc24a,
title = {Neural Architecture Search for Adversarial Robustness via Learnable Pruning},
author = { Yize Li and Pu Zhao and Ruyi Ding and Tong Zhou and Yunsi Fei and Xiaolin Xu and Xue Lin },
url = {https://www.frontiersin.org/journals/high-performance-computing/articles/10.3389/fhpcp.2024.1301384/abstract},
year = {2024},
date = {2024-06-18},
urldate = {2024-06-18},
booktitle = {Frontiers in High Performance Computing},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Yang, Jiechao; Liu, Yong
ETAS: Zero-Shot Transformer Architecture Search via Network Trainability and Expressivity Miscellaneous
2024.
@misc{Yang-24a,
title = {ETAS: Zero-Shot Transformer Architecture Search via Network Trainability and Expressivity},
author = {Jiechao Yang and Yong Liu },
url = {https://gsai.ruc.edu.cn/uploads/20240608/0d316f5c76e76d2d0a9ddc532a75be5d.pdf},
year = {2024},
date = {2024-06-18},
urldate = {2024-06-18},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
(Ed.)
UP-NAS: Unified Proxy for Neural Architecture Search Collection
2024.
@collection{Huang-cvprw24a,
title = {UP-NAS: Unified Proxy for Neural Architecture Search},
author = {Yi-Cheng Huang and Wei-Hua Li and Chih-Han Tsou and Jun-Cheng Chen and Chu-Song Chen},
url = {https://openaccess.thecvf.com/content/CVPR2024W/CVPR-NAS/papers/Huang_UP-NAS_Unified_Proxy_for_Neural_Architecture_Search_CVPRW_2024_paper.pdf},
year = {2024},
date = {2024-06-13},
urldate = {2024-06-13},
booktitle = {CVPR 2024 Workshop },
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Wang, Xin; Chen, Hong; Pan, Zirui; Zhou, Yuwei; Guan, Chaoyu; Sun, Lifeng; Zhu, Wenwu
Automated Disentangled Sequential Recommendation with Large Language Models Journal Article
In: ACM Trans. Inf. Syst., 2024, ISSN: 1046-8188, (Just Accepted).
@article{10.1145/3675164,
title = {Automated Disentangled Sequential Recommendation with Large Language Models},
author = {Xin Wang and Hong Chen and Zirui Pan and Yuwei Zhou and Chaoyu Guan and Lifeng Sun and Wenwu Zhu},
url = {https://doi.org/10.1145/3675164},
doi = {10.1145/3675164},
issn = {1046-8188},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-01},
journal = {ACM Trans. Inf. Syst.},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Sequential recommendation aims to recommend the next items that a target user may have interest in based on the user’s sequence of past behaviors, which has become a hot research topic in both academia and industry. In the literature, sequential recommendation adopts a Sequence-to-Item or Sequence-to-Sequence training strategy, which supervises a sequential model with a user’s next one or more behaviors as the labels and the sequence of the past behaviors as the input. However, existing powerful sequential recommendation approaches employ more and more complex deep structures such as Transformer in order to accurately capture the sequential patterns, which heavily rely on hand-crafted designs on key attention mechanism to achieve state-of-the-art performance, thus failing to automatically obtain the optimal design of attention representation architectures in various scenarios with different data. Other works on classic automated deep recommender systems only focus on traditional settings, ignoring the problem of sequential scenarios. In this paper, we study the problem of automated sequential recommendation, which faces two main challenges: i) How can we design a proper search space tailored for attention automation in sequential recommendation, and ii) How can we accurately search effective attention representation architectures considering multiple user interests reflected in the sequential behavior. To tackle these challenges, we propose an automated disentangled sequential recommendation (AutoDisenSeq) model. In particular, we employ neural architecture search (NAS) and design a search space tailored for automated attention representation in attentive intention-disentangled sequential recommendation with an expressive and efficient space complexity of (O(n^2)) given (n) as the number of layers. We further propose a context-aware parameter sharing mechanism taking characteristics of each sub-architecture into account to enable accurate architecture performance estimations and great flexibility for disentanglement of latent intention representation. Moreover, we propose AutoDisenSeq-LLM, which utilizes the textual understanding power of large language model (LLM) as a guidance to refine the candidate list for recommendation from AutoDisenSeq. We conduct extensive experiments to show that our proposed AutoDisenSeq model and AutoDisenSeq-LLM model outperform existing baseline methods on four real-world datasets in both overall recommendation and cold-start recommendation scenarios.},
note = {Just Accepted},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
On Efficient Object-Detection NAS for ADAS on Edge devices Collection
2024.
@collection{Gupta-CAI24a,
title = {On Efficient Object-Detection NAS for ADAS on Edge devices},
author = {Diksha Gupta and Rhui Dih Lee and Laura Wynter },
url = {https://ieeecai.org/2024/wp-content/pdfs/540900b013/540900b013.pdf},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-01},
booktitle = {2024 IEEE Conference on Artificial Intelligence (CAI)},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Micheal, A Ancy; Micheal, A Annie; Gopinathan, Anurekha; Barath, B U Anu
Deep Learning-based Multi-class Object Tracking With Occlusion Handling Mechanism in Uav Videos Technical Report
2024.
@techreport{nokey,
title = {Deep Learning-based Multi-class Object Tracking With Occlusion Handling Mechanism in Uav Videos},
author = {A Ancy Micheal and A Annie Micheal and Anurekha Gopinathan and B U Anu Barath},
url = {https://www.researchsquare.com/article/rs-4488926/v1},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
(Ed.)
Disentangled Continual Graph Neural Architecture Search with Invariant Modular Supernet Collection
2024.
@collection{zhang-icml24a,
title = { Disentangled Continual Graph Neural Architecture Search with Invariant Modular Supernet },
author = { Zeyang Zhang and Xin Wang and Yijian Qin and Hong Chen and Ziwei Zhang and Xu Chu and Wenwu Zhu },
url = {http://mn.cs.tsinghua.edu.cn/xinwang/PDF/papers/2024_Disentangled%20Continual%20Graph%20Neural%20Architecture%20Search%20with%20Invariant%20Modular%20Supernet.pdf},
year = {2024},
date = {2024-06-01},
booktitle = {Proceedings of the 41 st International Conference on Machine Learning},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Gao, Tianxiao; Guo, Li; Zhao, Shanwei; Xu, Peihan; Yang, Yukun; Liu, Xionghao; Wang, Shihao; Zhu, Shiai; Zhou, Dajiang
QuantNAS: Quantization-aware Neural Architecture Search For Efficient Deployment On Mobile Device Proceedings Article
In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pp. 1704-1713, 2024.
@inproceedings{Gao_2024_CVPR,
title = {QuantNAS: Quantization-aware Neural Architecture Search For Efficient Deployment On Mobile Device},
author = {Tianxiao Gao and Li Guo and Shanwei Zhao and Peihan Xu and Yukun Yang and Xionghao Liu and Shihao Wang and Shiai Zhu and Dajiang Zhou},
url = {https://openaccess.thecvf.com/content/CVPR2024W/CVPR-NAS/html/Gao_QuantNAS_Quantization-aware_Neural_Architecture_Search_For_Efficient_Deployment_On_Mobile_CVPRW_2024_paper.html},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-01},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
pages = {1704-1713},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
(Ed.)
Agent Based Model for AUTODL Optimisation Collection
2024.
@collection{Hedhili-icaart24a,
title = {Agent Based Model for AUTODL Optimisation},
author = {Aroua Hedhili and Imen Khelfa},
url = {https://www.scitepress.org/Papers/2024/123717/123717.pdf},
doi = {10.5220/0012371700003636},
year = {2024},
date = {2024-06-01},
urldate = {2024-06-01},
booktitle = {In Proceedings of the 16th International Conference on Agents and Artificial Intelligence (ICAART 2024) },
volume = {3},
pages = {568-575},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Wang, Zixiao; Wang, Jiansu; Li, Shuo; Yang, Jiadi; Xing, Tianzhang
A lightweight and real-time responsive framework for various visual tasks via neural architecture search Journal Article
In: CCF Transactions on Pervasive Computing and Interaction , 2024.
@article{Wang-ccftpci24a,
title = {A lightweight and real-time responsive framework for various visual tasks via neural architecture search},
author = {Zixiao Wang and Jiansu Wang and Shuo Li and Jiadi Yang and Tianzhang Xing },
url = {https://link.springer.com/article/10.1007/s42486-024-00157-w},
year = {2024},
date = {2024-05-21},
urldate = {2024-05-21},
journal = {CCF Transactions on Pervasive Computing and Interaction },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Krestinskaya, Olga; Fouda, Mohammed E.; Benmeziane, Hadjer; Maghraoui, Kaoutar El; Sebastian, Abu; Lu, Wei D.; Lanza, Mario; Li, Hai; Kurdahi, Fadi; Fahmy, Suhaib A.; Eltawil, Ahmed; Salama, Khaled N.
Neural architecture search for in-memory computing-based deep learning accelerators Journal Article
In: nature reviews electrical engineering , pp. 374-390, 2024.
@article{Krestinskaya-nree24a,
title = {Neural architecture search for in-memory computing-based deep learning accelerators},
author = {Olga Krestinskaya and Mohammed E. Fouda and Hadjer Benmeziane and Kaoutar El Maghraoui and Abu Sebastian and Wei D. Lu and Mario Lanza and Hai Li and Fadi Kurdahi and Suhaib A. Fahmy and Ahmed Eltawil and Khaled N. Salama
},
url = {https://www.nature.com/articles/s44287-024-00052-7},
year = {2024},
date = {2024-05-20},
urldate = {2024-05-20},
journal = {nature reviews electrical engineering },
pages = {374-390},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Venske, Sandra Mara Scós; de Almeida, Carolina Paula; Delgado, Myriam Regattieri
Metaheuristics and machine learning: an approach with reinforcement learning assisting neural architecture search Journal Article
In: Journal of Heuristics, 2024.
@article{Venske-jh24a,
title = {Metaheuristics and machine learning: an approach with reinforcement learning assisting neural architecture search},
author = {
Sandra Mara Scós Venske and Carolina Paula de Almeida and Myriam Regattieri Delgado
},
url = {https://link.springer.com/article/10.1007/s10732-024-09526-1},
doi = {https://doi.org/10.1007/s10732-024-09526-1},
year = {2024},
date = {2024-05-16},
urldate = {2024-05-16},
journal = { Journal of Heuristics},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mengyan Jin Jianfang Cao, Yun Tian
Ancient mural dynasty recognition algorithm based on a neural network architecture search Journal Article
In: Heritage Science , 2024.
@article{cao-hs24a,
title = {Ancient mural dynasty recognition algorithm based on a neural network architecture search},
author = {
Jianfang Cao, Mengyan Jin, Yun Tian, Zhen Cao & Cunhe Peng
},
url = {https://link.springer.com/article/10.1186/s40494-024-01274-6},
year = {2024},
date = {2024-05-15},
urldate = {2024-05-15},
journal = {Heritage Science },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ray, Subhosit
TOWARDS SELF-ORGANIZED BRAIN: TOPOLOGICAL REINFORCEMENT LEARNING WITH GRAPH CELLULAR AUTOMATA PhD Thesis
2024.
@phdthesis{ray-phd24a,
title = {TOWARDS SELF-ORGANIZED BRAIN: TOPOLOGICAL REINFORCEMENT LEARNING WITH GRAPH CELLULAR AUTOMATA},
author = {Subhosit Ray},
url = {https://www.proquest.com/docview/3054372231?pq-origsite=gscholar&fromopenview=true&sourcetype=Dissertations%20&%20Theses},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}
Ramaraj, N.; Murugan, G.; Regunathan, R.
Neural Network-Powered Conductorless Ticketing for Public Transportation Proceedings Article
In: 2024 4th International Conference on Pervasive Computing and Social Networking (ICPCSN), pp. 236-241, IEEE Computer Society, Los Alamitos, CA, USA, 2024.
@inproceedings{10607626,
title = {Neural Network-Powered Conductorless Ticketing for Public Transportation},
author = {N. Ramaraj and G. Murugan and R. Regunathan},
url = {https://doi.ieeecomputersociety.org/10.1109/ICPCSN62568.2024.00047},
doi = {10.1109/ICPCSN62568.2024.00047},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-01},
booktitle = {2024 4th International Conference on Pervasive Computing and Social Networking (ICPCSN)},
pages = {236-241},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
abstract = {The efficient functioning of public transportation systems is pivotal for societal connectivity and economic progress, as they serve as lifelines for commuting and mobility. However, the dependency on manual ticketing processes often leads to bottlenecks and inefficiencies, hindering smooth operations and customer satisfaction. This research work focuses on developing an Automated Ticketing System for public transportation, utilizing Computer Vision and Neural Networks. Through the incorporation of Neural Architecture Search and the integration of Deep Sort, a Deep Learning-based object tracking model, with aim to enhance system efficiency. The study demonstrates promising results, indicating the potential for streamlined ticketing processes in public transportation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Porta, Alessandro Benfenati Ambra Catozzi Giorgia Franchini Federica
Early stopping strategies in Deep Image Prior Technical Report
2024.
@techreport{Benfenati-prep24a,
title = {Early stopping strategies in Deep Image Prior},
author = {Alessandro Benfenati Ambra Catozzi Giorgia Franchini Federica Porta},
url = {https://www.researchsquare.com/article/rs-4396753/v1},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-01},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Lee, Matthew; Sanchez-Matilla, Ricardo; Stoyanov, Danail; Luengo, Imanol
DIPO: Differentiable Parallel Operation Blocks for Surgical Neural Architecture Search Journal Article
In: IEEE J Biomed Health Inform , 2024.
@article{LeeIEEEJBHI24a,
title = { DIPO: Differentiable Parallel Operation Blocks for Surgical Neural Architecture Search },
author = {Matthew Lee and Ricardo Sanchez-Matilla and Danail Stoyanov and Imanol Luengo
},
url = {https://pubmed.ncbi.nlm.nih.gov/38805333/},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-01},
journal = { IEEE J Biomed Health Inform },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Guilin; Wang, Qiang; Zheng, Xiawu
Distilling Structural Knowledge for Platform-Aware Semantic Segmentation Journal Article
In: Journal of Physics: Conference Series, vol. 2759, no. 1, pp. 012010, 2024.
@article{Li_2024,
title = {Distilling Structural Knowledge for Platform-Aware Semantic Segmentation},
author = {Guilin Li and Qiang Wang and Xiawu Zheng},
url = {https://dx.doi.org/10.1088/1742-6596/2759/1/012010},
doi = {10.1088/1742-6596/2759/1/012010},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-01},
journal = {Journal of Physics: Conference Series},
volume = {2759},
number = {1},
pages = {012010},
publisher = {IOP Publishing},
abstract = {Knowledge Distillation (KD) aims to distill the dark knowledge of a high-powered teacher network into a student network, which can improve the capacity of student network and has been successfully applied to semantic segmentation. However, the standard knowledge distillation approaches merely represent the supervisory signal of teacher network as the dark knowledge, while ignoring the impact of network architecture during distillation. In this paper, we found that the student network with a more similar architecture against the teacher network obtains more performance gain from distillation. Therefore, a more generalized paradigm for knowledge distillation is to distill both the soft-label and the structure of the teacher network. We propose a novel Structural Distillation (SD) method which introduces the structural similarity constraints into vanilla knowledge distillation. We leverage Neural Architecture Search technique to search optimal student structure for semantic segmentation from a well-designed search space, which mimics the given teacher both in terms of soft-label and network structure. Experiment results make clear that our proposed method outperforms both the NAS with conventional Knowledge Distillation and human-designed methods, and achieves sota performance on the Cityscapes dataset under various platform-aware latency constraints. Furthermore, the best architecture discovered on Cityscapes also transfers well to the PASCAL VOC2012 dataset.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
(Ed.)
SCAN-EDGE: FINDING MOBILENET-SPEED HYBRID NETWORKS FOR COMMODITY EDGE DEVICES Collection
2024.
@collection{Chiang-iclr24a,
title = {SCAN-EDGE: FINDING MOBILENET-SPEED HYBRID NETWORKS FOR COMMODITY EDGE DEVICES},
author = {Hung-Yueh Chiang and Diana Marculescu},
url = {https://openreview.net/pdf?id=Zbim40TgaJ},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-01},
booktitle = {ICLR 2024},
keywords = {},
pubstate = {published},
tppubtype = {collection}
}
Wang, Gang; Wang, Bang-Hai; Fei, Shao-Ming
An RNN–policy gradient approach for quantum architecture search Journal Article
In: Quantum Information Processing, vol. 23, no. 5, 2024, ISSN: 1573-1332.
@article{Wang_2024,
title = {An RNN–policy gradient approach for quantum architecture search},
author = {Gang Wang and Bang-Hai Wang and Shao-Ming Fei},
url = {http://dx.doi.org/10.1007/s11128-024-04393-y},
doi = {10.1007/s11128-024-04393-y},
issn = {1573-1332},
year = {2024},
date = {2024-05-01},
urldate = {2024-05-01},
journal = {Quantum Information Processing},
volume = {23},
number = {5},
publisher = {Springer Science and Business Media LLC},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gündüz, Hüseyin Anil; Mreches, René; Moosbauer, Julia; Robertson, Gary; To, Xiao-Yin; Franzosa, Eric A.; Huttenhower, Curtis; Rezaei, Mina; McHardy, Alice C.; Bischl, Bernd; Münch, Philipp C.; Binder, Martin
Optimized model architectures for deep learning on genomic data Journal Article
In: communications biology , 2024.
@article{nokey,
title = {Optimized model architectures for deep learning on genomic data},
author = {Hüseyin Anil Gündüz and René Mreches and Julia Moosbauer and Gary Robertson and Xiao-Yin To and Eric A. Franzosa and Curtis Huttenhower and Mina Rezaei and Alice C. McHardy and Bernd Bischl and Philipp C. Münch and Martin Binder
},
url = {https://www.nature.com/articles/s42003-024-06161-1},
year = {2024},
date = {2024-04-30},
urldate = {2024-04-30},
journal = { communications biology },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Guillaume, Lacharme; Hubert, Cardot; Christophe, Lente; Nicolas, Monmarche
The limitations of differentiable architecture search Journal Article
In: Pattern Analysis and Applications, 2024.
@article{Guillaume-paa24a,
title = {The limitations of differentiable architecture search},
author = {
Lacharme Guillaume and Cardot Hubert and Lente Christophe and Monmarche Nicolas
},
url = {https://link.springer.com/article/10.1007/s10044-024-01260-5},
year = {2024},
date = {2024-04-12},
urldate = {2024-04-12},
journal = {Pattern Analysis and Applications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Yili; Chen, Jiamin; Li, Qiutong; He, Changlong; Gao, Jianliang
Graph neural architecture search with heterogeneous message-passing mechanisms Journal Article
In: Knowledge and Information Systems , 2024.
@article{Wang-kis24a,
title = {Graph neural architecture search with heterogeneous message-passing mechanisms},
author = {Yili Wang and Jiamin Chen and Qiutong Li and Changlong He and Jianliang Gao
},
url = {https://link.springer.com/article/10.1007/s10115-024-02090-x},
year = {2024},
date = {2024-04-12},
urldate = {2024-04-12},
journal = {Knowledge and Information Systems },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sankar, S. M. Udhaya; D. Dhinakaran, R; Shrey Kumar Verma Selvaraj, R; Natarajasivam, P. C. Praveen Kishore
Optimizing diabetic retinopathy disease prediction using PNAS, ASHA, and transfer learning Book Chapter
In: 2024, ISBN: 9781003430421.
@inbook{Sankar-anic24a,
title = { Optimizing diabetic retinopathy disease prediction using PNAS, ASHA, and transfer learning },
author = {S. M. Udhaya Sankar and D. Dhinakaran, R and Selvaraj, Shrey Kumar Verma, R and Natarajasivam, P. C. Praveen Kishore},
url = {https://www.taylorfrancis.com/chapters/edit/10.1201/9781003430421-7/optimizing-diabetic-retinopathy-disease-prediction-using-pnas-asha-transfer-learning-udhaya-sankar-dhinakaran-selvaraj-shrey-kumar-verma-natarajasivam-praveen-kishore},
isbn = {9781003430421},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-01},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Ma, Benteng; Zhang, Jing; Xia, Yong; Tao, Dacheng
VNAS: Variational Neural Architecture Search Journal Article
In: International Journal of Computer Vision , 2024.
@article{nokey,
title = {VNAS: Variational Neural Architecture Search},
author = {
Benteng Ma and Jing Zhang and Yong Xia and Dacheng Tao
},
url = {https://link.springer.com/article/10.1007/s11263-024-02014-w},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-01},
journal = { International Journal of Computer Vision },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Garavagno, Andrea Mattia; Ragusa, Edoardo; Frisoli, Antonio; Gastaldo, Paolo
An Affordable Hardware-Aware Neural Architecture Search for Deploying Convolutional Neural Networks on Ultra-Low-Power Computing Platforms Journal Article
In: IEEE Sensors Letters, 2024.
@article{Garavagno-sensorsletter24a,
title = {An Affordable Hardware-Aware Neural Architecture Search for Deploying Convolutional Neural Networks on Ultra-Low-Power Computing Platforms},
author = {Andrea Mattia Garavagno and Edoardo Ragusa and Antonio Frisoli and Paolo Gastaldo},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=10496186&tag=1},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-01},
journal = {IEEE Sensors Letters},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nasrullah, Nasrullah; Sang, Jun; Alam, Mohammad S.; Mateen, Muhammad; Cai, Bin; Hu, Haibo
Automated Lung Nodule Detection and Classification Using Deep Learning Combined with Multiple Strategies Journal Article
In: Sensors, 2024.
@article{Nasrullah,
title = {Automated Lung Nodule Detection and Classification Using Deep Learning Combined with Multiple Strategies},
author = {Nasrullah Nasrullah and Jun Sang and Mohammad S. Alam and Muhammad Mateen and Bin Cai and Haibo Hu},
url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6749467/},
doi = {10.3390/s19173722},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-01},
journal = {Sensors},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Berezsky, O. M.; Liashchynskyi, P. B.
METHOD OF GENERATIVE-ADVERSARIAL NETWORKS SEARCHING ARCHITECTURES FOR BIOMEDICAL IMAGES SYNTHESIS Journal Article
In: Radio Electronics, Computer Science, Control, no. 1, pp. 104, 2024.
@article{Berezsky_Liashchynskyi_2024,
title = {METHOD OF GENERATIVE-ADVERSARIAL NETWORKS SEARCHING ARCHITECTURES FOR BIOMEDICAL IMAGES SYNTHESIS},
author = {O. M. Berezsky and P. B. Liashchynskyi},
url = {http://ric.zntu.edu.ua/article/view/300976},
doi = {10.15588/1607-3274-2024-1-10},
year = {2024},
date = {2024-04-01},
urldate = {2024-04-01},
journal = {Radio Electronics, Computer Science, Control},
number = {1},
pages = {104},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Elghazi, Khalid; Ramchoun, Hassan; Masrour, Tawfik
Enhancing CNN structure and learning through NSGA-II-based multi-objective optimization Journal Article
In: Evolving Systems , 2024.
@article{Elghazi-es24a,
title = {Enhancing CNN structure and learning through NSGA-II-based multi-objective optimization},
author = {
Khalid Elghazi and Hassan Ramchoun and Tawfik Masrour
},
url = {https://link.springer.com/article/10.1007/s12530-024-09574-9},
year = {2024},
date = {2024-04-01},
journal = {Evolving Systems },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liang, Jiayu; Cao, Hanqi; Lu, Yaxin; Su, Mingming
Architecture search of accurate and lightweight CNNs using genetic algorithm Journal Article
In: Genetic Programming and Evolvable Machines , 2024.
@article{Linag-gpem24a,
title = {Architecture search of accurate and lightweight CNNs using genetic algorithm},
author = {
Jiayu Liang and Hanqi Cao and Yaxin Lu and Mingming Su
},
url = {https://link.springer.com/article/10.1007/s10710-024-09484-4},
year = {2024},
date = {2024-04-01},
journal = { Genetic Programming and Evolvable Machines },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Xianbao; Liu, Pengfei; Xiang, Sheng; Weng, Yangkai; Yao, Minghai
Search on dual-space: discretization accuracy-based architecture search for person re-identification Journal Article
In: The Visual Computer , 2024.
@article{Wang-vc24a,
title = {Search on dual-space: discretization accuracy-based architecture search for person re-identification},
author = {
Xianbao Wang and Pengfei Liu and Sheng Xiang and Yangkai Weng and Minghai Yao
},
url = {https://link.springer.com/article/10.1007/s00371-024-03308-3},
year = {2024},
date = {2024-03-28},
journal = {The Visual Computer },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kapoor, Rahul; Pillay, Nelishia
A genetic programming approach to the automated design of CNN models for image classification and video shorts creation Journal Article
In: Genetic Programming and Evolvable Machines, 2024.
@article{nokey,
title = {A genetic programming approach to the automated design of CNN models for image classification and video shorts creation},
author = {
Rahul Kapoor and Nelishia Pillay
},
url = {https://link.springer.com/article/10.1007/s10710-024-09483-5},
year = {2024},
date = {2024-03-14},
journal = {Genetic Programming and Evolvable Machines},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
jin, Cong; Huang, Jinjie; Chen, Yuanjian
Neural architecture search via progressive partial connection with attention mechanism Journal Article
In: scientific reports , 2024.
@article{jin,
title = {Neural architecture search via progressive partial connection with attention mechanism},
author = {Cong jin and Jinjie Huang and Yuanjian Chen},
url = {https://www.nature.com/articles/s41598-024-57236-2},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
journal = { scientific reports },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
CHEN, BOYU
Neural Architecture Search for Convolutional and Transformer Deep Neural Networks PhD Thesis
2024.
@phdthesis{CHEN-phd24a,
title = {Neural Architecture Search for Convolutional and Transformer Deep Neural Networks},
author = {BOYU CHEN},
url = {https://scholar.google.de/scholar_url?url=https://ses.library.usyd.edu.au/bitstream/handle/2123/32334/chen_b_thesis.pdf%3Fsequence%3D1&hl=de&sa=X&d=15530204168501603894&ei=b9PvZbWAGsfTy9YPt6iP6AU&scisig=AFWwaeZNQdCufcbiu0EUFQdEEyKz&oi=scholaralrt&hist=mvciDDAAAAAJ:2945779489622371749:AFWwaeYSwjSBxI9k5p1JRsFqGwve&html=&pos=1&folt=kw},
year = {2024},
date = {2024-03-01},
urldate = {2024-03-01},
keywords = {},
pubstate = {published},
tppubtype = {phdthesis}
}