@misc{mdhere_algorithms_2023, title = {Algorithms of war: {The} use of artificial intelligence in decision making in armed conflict}, shorttitle = {Algorithms of war}, url = {https://blogs.icrc.org/law-and-policy/2023/10/24/algorithms-of-war-use-of-artificial-intelligence-decision-making-armed-conflict/}, abstract = {Authors critically examine some of the touted benefits of artificial intelligence (AI) when used to support decisions by armed actors in war.}, language = {en-US}, urldate = {2025-04-28}, journal = {Humanitarian Law \& Policy Blog}, author = {{Stewart, Ruben and Hinds, Georgia}}, month = oct, year = {2023}, } @misc{noauthor_secretary-generals_nodate, title = {Secretary-general's remarks to the security council - on artificial intelligence [bilingual, as delivered] {\textbar} united nations secretary-general}, url = {https://www.un.org/sg/en/content/sg/statement/2024-12-19/secretary-generals-remarks-the-security-council-artificial-intelligence-bilingual-delivered}, abstract = {Mr. President, Excellencies, I thank the United States for convening the meeting on Artificial Intelligence and the Maintenance of International Peace and Security. I briefed this Council about AI in July 2023.  As I said then, those that feel like technology is moving very fast must understand a simple fact: Technology will never move in the future as slowly as today. In the}, urldate = {2025-04-28}, } @article{paoli_ai_2025, title = {Ai in the military domain: a briefing note for states}, shorttitle = {Ai in the military domain}, url = {https://unidir.org/publication/ai-military-domain-briefing-note-states/}, abstract = {On 24 December 2024, the United Nations (UN) General Assembly adopted Resolution A/RES/79/239 on Artificial intelligence in the military domain and its implications for international peace and security. The UN Secretary-General recently invited Member States, observer States, international and regional organizations, the International Committee of the Red Cross, civil society, industry and the scientific community}, language = {en-GB}, urldate = {2025-04-28}, author = {Paoli, Giacomo Persi and Afina, Yasmin}, month = oct, year = {2025}, } @misc{noauthor_life-and-death_nodate, title = {‘{Life}-and-death situations must never be left to chance, code, corporate interest’, secretary-general tells conference on artificial intelligence {\textbar} meetings coverage and press releases}, url = {https://press.un.org/en/2025/sgsm22605.doc.htm}, abstract = {Following is UN Secretary-General António Guterres’ message for the Global Conference on Artificial Intelligence (AI), Security and Ethics, in Geneva today:}, urldate = {2025-04-28}, } @article{sess.:_2024-2025_artificial_2024, title = {Artificial intelligence in the military domain and its implications for international peace and security :: resolution /: adopted by the {General} {Assembly}}, shorttitle = {Artificial intelligence in the military domain and its implications for international peace and security}, url = {https://digitallibrary.un.org/record/4071348}, language = {en}, urldate = {2025-04-28}, author = {UN General Assembly}, month = dec, year = {2024}, } @incollection{smuha_artificial_2025, address = {Cambridge}, series = {Cambridge {Law} {Handbooks}}, title = {Artificial intelligence and armed conflicts}, isbn = {9781009367783}, url = {https://www.cambridge.org/core/books/cambridge-handbook-of-the-law-ethics-and-policy-of-artificial-intelligence/artificial-intelligence-and-armed-conflicts/46B20A780349FA8939E8B886E77C4B93}, abstract = {The rules of war, formally known as international humanitarian law, have been developing for centuries, reflecting society’s moral compass, the evolution of its values, and technological progress. While humanitarian law has been successful in prohibiting the use of certain methods and means of warfare, it is nevertheless destined to remain in a constant catch-up cycle with the atrocities of war. Nowadays, the widespread development and adoption of digital technologies in warfare, including AI, are leading to some of the biggest changes in human history. Is international humanitarian law up to the task of addressing the threats those technologies can present in the context of armed conflicts? This chapter provides a basic understanding of the system, principles, and internal logic of this legal domain, which is necessary to evaluate the actual or potential role of AI systems in (non-)international armed conflicts. The chapter aims to contribute to the discussion of the ex-ante regulation of AI systems used for military purposes beyond the scope of lethal autonomous weapons, as well as to recognize the potential that AI carries for improving the applicability of the basic principles of international humanitarian law, if used in an accountable and responsible way.}, urldate = {2025-04-28}, booktitle = {The {Cambridge} {Handbook} of the {Law}, {Ethics} and {Policy} of {Artificial} {Intelligence}}, publisher = {Cambridge University Press}, author = {Yordanova, Katerina}, editor = {Smuha, Nathalie A.}, year = {2025}, doi = {10.1017/9781009367783.024}, keywords = {AI, armed conflicts, dual use, cybersecurity, international humanitarian law}, pages = {411--428}, } @article{roumate_artificial_2020, title = {Artificial intelligence, ethics and international human rights law}, volume = {29}, copyright = {Copyright (c) 2021 The International Review of Information Ethics}, issn = {2563-5638}, url = {https://informationethics.ca/index.php/irie/article/view/422}, doi = {10.29173/irie422}, abstract = {The ethics of artificial intelligence is the response to a new dilemma that demands international society to provide a legal response to the many ethical challenges artificial intelligence creates. COVID-19 accelerates the use of AI in all countries and all fields. The pandemic is accelerating the transition to a society that is increasingly based on the use of, and reliance on, AI, and this also enhances the threats and creates new risks related to human rights. Artificial Intelligence (AI) influences human rights and international humanitarian law. This paper addresses international mechanisms and ethics as new rules which can ensure the protection of human rights in the age of AI. Two arguments are discussed in this study. Considering the ubiquitous and global reach of AI, the challenges it imposes requires an international legal oversight, a requirement that highlights the importance of ethical frameworks. In conclusion, the paper emphasizes how optimal action is needed to protect human rights in the age of AI. Rethinking international law and human rights and enhancing the ethical frameworks have thus become obligatory rather than a choice.}, language = {en}, urldate = {2025-04-28}, journal = {The International Review of Information Ethics}, author = {Roumate, Fatima}, year = {2020}, keywords = {Artificial Intelligence, Ethics, Human Rights, International Humanitarian Law}, } @techreport{morgan_military_2020, title = {Military applications of artificial intelligence: ethical concerns in an uncertain world}, shorttitle = {Military applications of artificial intelligence}, url = {https://www.rand.org/pubs/research_reports/RR3139-1.html}, abstract = {The authors examine the ethical considerations, benefits, and risks of military applications of artificial intelligence. Comparing development efforts in the United States, China, and Russia (as well as various positions on proposals to regulate or ban autonomous weapons), the authors point to a need for the United States to continue to pursue advantages in the field and explore confidence-building and risk-reduction measures with other states.}, language = {en}, urldate = {2025-04-28}, institution = {RAND Corporation}, author = {Morgan, Forrest E. and Boudreaux, Benjamin and Lohn, Andrew J. and Ashby, Mark and Curriden, Christian and Klima, Kelly and Grossman, Derek}, month = apr, year = {2020}, keywords = {Research, Military Doctrine, Survey Research Methodology, China, Artificial Intelligence, United States, Military Technology, Science, Technology, and Innovation Policy, Russia}, } @article{oniani_adopting_2023, title = {Adopting and expanding ethical principles for generative artificial intelligence from military to healthcare}, volume = {6}, copyright = {2023 The Author(s)}, issn = {2398-6352}, url = {https://www.nature.com/articles/s41746-023-00965-x}, doi = {10.1038/s41746-023-00965-x}, abstract = {In 2020, the U.S. Department of Defense officially disclosed a set of ethical principles to guide the use of Artificial Intelligence (AI) technologies on future battlefields. Despite stark differences, there are core similarities between the military and medical service. Warriors on battlefields often face life-altering circumstances that require quick decision-making. Medical providers experience similar challenges in a rapidly changing healthcare environment, such as in the emergency department or during surgery treating a life-threatening condition. Generative AI, an emerging technology designed to efficiently generate valuable information, holds great promise. As computing power becomes more accessible and the abundance of health data, such as electronic health records, electrocardiograms, and medical images, increases, it is inevitable that healthcare will be revolutionized by this technology. Recently, generative AI has garnered a lot of attention in the medical research community, leading to debates about its application in the healthcare sector, mainly due to concerns about transparency and related issues. Meanwhile, questions around the potential exacerbation of health disparities due to modeling biases have raised notable ethical concerns regarding the use of this technology in healthcare. However, the ethical principles for generative AI in healthcare have been understudied. As a result, there are no clear solutions to address ethical concerns, and decision-makers often neglect to consider the significance of ethical principles before implementing generative AI in clinical practice. In an attempt to address these issues, we explore ethical principles from the military perspective and propose the “GREAT PLEA” ethical principles, namely Governability, Reliability, Equity, Accountability, Traceability, Privacy, Lawfulness, Empathy, and Autonomy for generative AI in healthcare. Furthermore, we introduce a framework for adopting and expanding these ethical principles in a practical way that has been useful in the military and can be applied to healthcare for generative AI, based on contrasting their ethical concerns and risks. Ultimately, we aim to proactively address the ethical dilemmas and challenges posed by the integration of generative AI into healthcare practice.}, language = {en}, number = {1}, urldate = {2025-04-28}, journal = {npj Digital Medicine}, author = {Oniani, David and Hilsman, Jordan and Peng, Yifan and Poropatich, Ronald K. and Pamplin, Jeremy C. and Legault, Gary L. and Wang, Yanshan}, month = dec, year = {2023}, keywords = {Health care, Translational research}, pages = {1--10}, } @article{wasilow_artificial_2019, title = {Artificial intelligence, robotics, ethics, and the military: a canadian perspective}, volume = {40}, copyright = {http://onlinelibrary.wiley.com/termsAndConditions\#vor}, issn = {0738-4602, 2371-9621}, shorttitle = {Artificial intelligence, robotics, ethics, and the military}, url = {https://onlinelibrary.wiley.com/doi/10.1609/aimag.v40i1.2848}, doi = {10.1609/aimag.v40i1.2848}, abstract = {Defense and security organizations depend upon science and technology to meet operational needs, predict and counter threats, and meet increasingly complex demands of modern warfare. Artificial intelligence and robotics could provide solutions to a wide range of military gaps and deficiencies. At the same time, the unique and rapidly evolving nature of AI and robotics challenges existing polices, regulations, and values, and introduces complex ethical issues that might impede their development, evaluation, and use by the Canadian Armed Forces (CAF). Early consideration of potential ethical issues raised by military use of emerging AI and robotics technologies in development is critical to their effective implementation. This article presents an ethics assessment framework for emerging AI and robotics technologies. It is designed to help technology developers, policymakers, decision makers, and other stakeholders identify and broadly consider potential ethical issues that might arise with the military use and integration of emerging AI and robotics technologies of interest. We also provide a contextual environment for our framework, as well as an example of how our framework can be applied to a specific technology. Finally, we briefly identify and address several pervasive issues that arose during our research.}, language = {en}, number = {1}, urldate = {2025-04-28}, journal = {AI Magazine}, author = {Wasilow, Sherry and Thorpe, Joelle B.}, month = mar, year = {2019}, pages = {37--48}, } @misc{crootof_ai_2022, address = {Rochester, NY}, type = {{SSRN} {Scholarly} {Paper}}, title = {Ai and the actual ihl accountability gap}, url = {https://papers.ssrn.com/abstract=4289005}, abstract = {Article after article bemoans how new military technologies — including landmines, unmanned drones, cyberoperations, autonomous weapon systems and artificial intelligence (AI) — create new “accountability gaps” in armed conflict. Certainly, by introducing geographic, temporal and agency distance between a human’s decision and its effects, these technologies expand familiar sources of error and complicate causal analyses, making it more difficult to hold an individual or state accountable for unlawful harmful acts.But in addition to raising these new accountability issues, novel military technologies are also making more salient the accountability chasm that already exists at the heart of international humanitarian law (IHL): the relative lack of legal accountability for unintended, “awful but lawful” civilian harm.Technological developments often make older, infrequent or underreported problems more stark, pervasive or significant. While many proposals focus on regulating particular weapons technologies to address concerns about increased incidental harms or increased accidents, this is not a case of the law failing to keep up with technological development. Instead, technological developments have drawn attention to the accountability gap built into the structure of IHL. In doing so, AI and other new military technologies have highlighted the need for accountability mechanisms for all civilian harms.}, language = {en}, urldate = {2025-04-28}, author = {Crootof, Rebecca}, month = nov, year = {2022}, keywords = {international humanitarian law, law of armed conflict, artificial intelligence, accountability, weapons, accidents}, } @article{woodcock_human/machine-learning_2024, title = {Human/machine(-learning) interactions, human agency and the international humanitarian law proportionality standard}, volume = {38}, issn = {1360-0826, 1469-798X}, url = {https://www.tandfonline.com/doi/full/10.1080/13600826.2023.2267592}, doi = {10.1080/13600826.2023.2267592}, language = {en}, number = {1}, urldate = {2025-04-28}, journal = {Global Society}, author = {Woodcock, Taylor Kate}, month = jan, year = {2024}, pages = {100--121}, } @incollection{klonowska_article_2022, address = {The Hague}, title = {Article 36: review of ai decision-support systems and other emerging technologies of warfare}, isbn = {9789462654914}, shorttitle = {Article 36}, url = {https://doi.org/10.1007/978-94-6265-491-4_6}, abstract = {Artificial intelligence (AI) decision-support systems significantly impact how States make warfare decisions, conduct hostilities, and whether they comply with international humanitarian law. Decision-support systems, even if they do not autonomously execute targets, can play a critical role in the long chain of human-machine and machine-machine decision-making infrastructure, thus contributing to the co-production of hostilities.}, language = {en}, urldate = {2025-04-28}, booktitle = {Yearbook of {International} {Humanitarian} {Law}, {Volume} 23 (2020)}, publisher = {T.M.C. Asser Press}, author = {Klonowska, Klaudia}, editor = {Gill, Terry D. and Geiß, Robin and Krieger, Heike and Mignot-Mahdavi, Rebecca}, year = {2022}, doi = {10.1007/978-94-6265-491-4_6}, pages = {123--153}, } @misc{noauthor_chapter_nodate, title = {Chapter 4 - nscai final report}, url = {https://reports.nscai.gov/final-report/chapter-4}, language = {en}, urldate = {2025-04-28}, } @misc{says_autonomous_2023, title = {Autonomous weapon systems and the changing face of international humanitarian law}, url = {https://internationallaw.blog/2023/01/16/autonomous-weapon-systems-and-the-changing-face-of-international-humanitarian-law/}, abstract = {By Shahriar Yeasin Khan, Barrister-at-Law, The Honourable Society of Lincoln’s Inn Autonomous Weapon Systems based on Artificial Intelligence As humankind continues to make unprecedented stri…}, language = {en}, urldate = {2025-04-28}, journal = {International Law Blog}, author = {Shahriar Yeasin Khan}, month = jan, year = {2023}, }