Spaces:
Sleeping
Sleeping
# Information related to high-level characteristics of AI project, including its market status, operator, and type of AI | |
operator_role: | |
provider: # Art. 2 | |
verbose: 'The operator of this AI project is a natural or legal person, public authority, agency or other body that develops an AI system or a general-purpose AI model or that has an AI system or a general-purpose AI model developed and places it on the market or puts the AI system into service under its own name or trademark, whether for payment or free of charge' | |
value: !!bool false | |
deployer: # Art. 2 | |
verbose: 'AI project operator is a natural or legal person, public authority, agency or other body using an AI system under its authority except where the AI system is used in the course of a personal non-professional activity' | |
value: !!bool false | |
eu_located: # Art. 2 | |
verbose: 'AI project operator has its place of establishment or location within the Union' | |
value: !!bool True | |
output_used: # Art. 2 | |
verbose: 'The output produced by the AI system is used in the Union' | |
value: !!bool false | |
importer: # Art. 2 | |
verbose: 'AI project operator is a natural or legal person located or established in the Union that places on the market an AI system that bears the name or trademark of a natural or legal person established in a third country' | |
value: !!bool false | |
distributor: | |
verbose: 'AI project operator is a natural or legal person in the supply chain, other than a provider or the importer, that makes an AI system available on the Union market' | |
value: !!bool false # Art. 2 | |
product_manufacturer: | |
verbose: 'AI project operator is a product manufacturer' | |
value: !!bool false # Art. 2 | |
# ypi: just to not that providers, deployers and product manufacturer that are outside of EU are still in scope if the output of AI is used within EU. | |
# importer and distributor are only in scope if they locate within EU. | |
eu_market_status: | |
placed_on_market: # Art. 3(9) | |
verbose: 'AI project is being made available on the EU market (i.e., supplied for distribution or use in the course of a commercial activity, whether in return for payment or free of charge) for the first time' | |
value: !!bool false | |
put_into_service: #Art. 3(11) | |
verbose: 'AI project is being used for its intended purpose for the first time in the EU, either by the operator or by a deployer to whom it is directly supplied' | |
value: !!bool false | |
ai_system: | |
ai_system: # Art. 3(1) | |
verbose: 'AI project is a machine-based system that is designed to operate with varying levels of autonomy and that may exhibit adaptiveness after deployment, and that, for explicit or implicit objectives, infers, from the input it receives, how to generate outputs such as predictions, content, recommendations, or decisions that can influence physical or virtual environments' | |
value: !!bool false | |
gpai_model: | |
gpai_model: # Art. 3(63) | |
verbose: 'AI project is an AI model, including where such an AI model is trained with a large amount of data using self-supervision at scale, that displays significant generality and is capable of competently performing a wide range of distinct tasks regardless of the way the model is placed on the market and that can be integrated into a variety of downstream systems or applications, except AI models that are used for research, development or prototyping activities before they are placed on the market' | |
value: !!bool false | |
gpai_model_systematic_risk: | |
evaluation: # Art. 51 (1)(a) | |
verbose: 'The AI project has high impact capabilities based on an evaluation using appropriate technical tools and methodologies, including indicators and benchmarks' | |
value: !!bool false | |
committee: # Art. 51 (1)(b) | |
verbose: 'The AI project has capabilities or an impact equivalent to high impact capabilities based on a decision of the Commission, ex officio or following a qualified alert from the scientific panel' | |
value: !!bool false | |
flops: # Art. 51(2) | |
verbose: 'The cumulative amount of computation used for the training of the AI project, as measured in floating point operations (FLOPs), has been greater than 10^25' | |
value: !!bool false | |
# ypi: I don't think we need to help users figure out if their GPAI model has systemic risk. There are only very few models with that risk(only two at the moment), and they’re mostly used by big tech, who aren’t our target users anyway. Plus, it’s the job of the AI office in the EU Commission to keep that list updated which means the criteria about flops can also change. | |
# Information related to the Act's exceptions for scientific research, open-source AI, and more | |
excepted: | |
scientific: # Art. 2(6) | |
verbose: 'AI project is or was specifically developed and put into service for the sole purpose of scientific research and development' | |
value: !!bool false | |
pre_market: # Art. 2(8) | |
verbose: 'AI project strictly consists of research, testing or development activity of the sort that takes place prior to their being placed on the market or put into service' | |
value: !!bool false | |
open_source_ai_system: # Art. 2(11) | |
verbose: 'AI project is released under free and open-source licences' | |
value: !!bool false | |
open_source_gpai_model: # Art. 53(2) | |
verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks' | |
value: !!bool false | |
# ypi: add "personal non-professional activity." to the above list? -- Art 2 (10) | |
# ypi: The exemptions can be moved up to the operator role since it’s also related to Article 2. | |
# Information related to practices prohibited by the Act | |
prohibited_practice: | |
ai_system: | |
manipulative: # Art. 5(1)(a) | |
verbose: 'The AI project deploys subliminal or purposefully manipulative or deceptive techniques, with the objective or effect of materially distorting the behavior of people by appreciably impairing their ability to make an informed decision, thereby causing them to take a decision that they would not have otherwise taken in a manner that causes or is reasonably likely to cause significant harm' | |
value: !!bool false | |
exploit_vulnerable: # Art. 5(1)(b) | |
verbose: 'The AI project exploits the vulnerabilities of natural people due to their age, disability or a specific social or economic situation, with the objective or effect of materially distorting their behaviour in a manner that causes or is reasonably likely to cause significant harm' | |
value: !!bool false | |
social_score: # Art. 5(1)(c) | |
verbose: 'The AI project is for the evaluation or classification of natural people over a certain period of time based on their social behaviour or known, inferred or predicted personal or personality characteristics, with the social score leading to at least one of the following: (i) detrimental or unfavourable treatment of certain natural people in social contexts that are unrelated to the contexts in which the data was originally generated or collected; (ii) detrimental or unfavourable treatment of certain natural people that is unjustified or disproportionate to their social behaviour or its gravity' | |
value: !!bool false | |
crime_prediction: # Art. 5(1)(d) | |
verbose: 'This AI project makes risk assessments of natural persons in order to assess or predict the risk of them committing a criminal offence, based solely on the profiling of the natural person or on assessing their personality traits and characteristics (and does not support the human assessment of the involvement of a person in a criminal activity, which is already based on objective and verifiable facts directly linked to a criminal activity)' | |
value: !!bool false | |
untarged_face: # Art. 5(1)(e) | |
verbose: 'This AI project creates or expand facial recognition databases through the untargeted scraping of facial images from the internet or CCTV footage' | |
value: !!bool false | |
emotion_prediction: # Art. 5(1)(f) | |
verbose: 'The AI project infers emotions of a natural person in the areas of workplace and education institutions and is not intended to be put in place or into the market for medical or safety reasons' | |
value: !!bool false | |
biometric: | |
categorization: # Art. 5(1)(g) | |
verbose: 'The AI project involves the use of biometric categorisation systems that categorise individually natural persons based on their biometric data to deduce or infer their race, political opinions, trade union membership, religious or philosophical beliefs, sex life or sexual orientation; this prohibition does not cover any labelling or filtering of lawfully acquired biometric datasets, such as images, based on biometric data or categorizing of biometric data in the area of law enforcement' | |
value: !!bool false | |
real_time: # Art. 5(1)(h) | |
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement' | |
value: !!bool false | |
real_time_exception_victim: # Art. 5(1)(h) | |
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the targeted search for specific victims of abduction, trafficking in human beings or sexual exploitation of human beings, or the search for missing persons' | |
value: !!bool false | |
real_time_exception_threat: | |
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the prevention of a specific, substantial and imminent threat to the life or physical safety of natural persons or a genuine and present or genuine and foreseeable threat of a terrorist attack' | |
real_time_exception_investigation: | |
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the localisation or identification of a person suspected of having committed a criminal offence, for the purpose of conducting a criminal investigation or prosecution or executing a criminal penalty for offences referred to in Annex II and punishable in the Member State concerned by a custodial sentence or a detention order for a maximum period of at least four years.' | |
value: !!bool false | |
# ypi: my interpretation for emotion_prediction is "AI systems that infer emotions of individuals in the areas of workplace and educational institutions, except for AI medical or safety systems." | |
# Requirements for those projects which involve high-risk AI systems | |
high_risk_ai_system: | |
safety_component: # Art. 6(1)(a) | |
verbose: 'AI project is intended to be used as a safety component of a product' | |
value: !!bool false | |
product_covered_by_machinery_regulation: # Art. 6(1)(b); Annex I | |
verbose: 'AI project is itself a product, covered by Directive 2006/42/EC of the European Parliament and of the Council of 17 May 2006 on machinery, and amending Directive 95/16/EC (OJ L 157, 9.6.2006, p. 24) [as repealed by the Machinery Regulation]' | |
value: !!bool false | |
product_covered_by_toy_safety_regulation: # Art. 6(1)(b); Annex I | |
verbose: 'AI project is itself a product, covered by Directive 2009/48/EC of the European Parliament and of the Council of 18 June 2009 on the safety of toys (OJ L 170, 30.6.2009, p. 1)' | |
value: !!bool false | |
product_covered_by_watercraft_regulation: # Art. 6(1)(b); Annex I | |
verbose: 'AI project is itself a product, covered by Directive 2013/53/EU of the European Parliament and of the Council of 20 November 2013 on recreational craft and personal watercraft and repealing Directive 94/25/EC (OJ L 354, 28.12.2013, p. 90)' | |
value: !!bool false | |
biometric_categorization: # Art. 6(2); Annex III(1)(b) | |
verbose: 'AI project is intended to be used for biometric categorisation, according to sensitive or protected attributes or characteristics based on the inference of those attributes or characteristics' | |
value: !!bool false | |
emotion_recognition: # Art. 6(2); Annex III(1)(c) | |
verbose: 'AI project is intended to be used for emotion recognition' | |
value: !!bool false | |
critical_infrastructure: # Art. 6(2); Annex III(2) | |
verbose: 'AI project is intended to be used as safety components in the management and operation of critical digital infrastructure, road traffic, or in the supply of water, gas, heating or electricity' | |
value: !!bool false | |
educational: # Art. 6(2); Annex III(3)(a) | |
verbose: 'AI project is intended to be used to determine access or admission or to assign natural persons to educational and vocational training institutions at all levels' | |
value: !!bool false | |
recruitment: # Art. 6(2); Annex III(4)(a) | |
verbose: 'AI project is intended to be used for the recruitment or selection of natural persons, in particular to place targeted job advertisements, to analyse and filter job applications, and to evaluate candidates' | |
value: !!bool false | |
public_assistance: # Art. 6(2); Annex III(5)(a) | |
verbose: 'AI project is intended to be used by public authorities or on behalf of public authorities to evaluate the eligibility of natural persons for essential public assistance benefits and services, including healthcare services, as well as to grant, reduce, revoke, or reclaim such benefits and services' | |
value: !!bool false | |
victim_assessment: # Art. 6(2); Annex III(6)(a) | |
verbose: 'AI project is intended to be used by or on behalf of law enforcement authorities, or by Union institutions, bodies, offices or agencies in support of law enforcement authorities or on their behalf to assess the risk of a natural person becoming the victim of criminal offences' | |
value: !!bool false | |
polygraph: # Art. 6(2); Annex III(7)(a) | |
verbose: 'AI project is intended to be used by or on behalf of competent public authorities or by Union institutions, bodies, offices or agencies as polygraphs or similar tools' | |
value: !!bool false | |
judicial: # Art. 6(2); Annex III(8)(a) | |
verbose: 'AI project is intended to be used by a judicial authority or on their behalf to assist a judicial authority in researching and interpreting facts and the law and in applying the law to a concrete set of facts, or to be used in a similar way in alternative dispute resolution' | |
value: !!bool false | |
filter_exception_rights: # Art. 6(3) | |
verbose: 'The AI initiate does not pose a significant risk of harm to the health, safety or fundamental rights of natural persons, including by not materially influencing the outcome of decision making' | |
value: !!bool false | |
filter_exception_narrow: # Art. 6(3)(a) | |
verbose: 'The AI project is intended to perform a narrow procedural task' | |
value: !!bool false | |
filter_exception_human: # Art. 6(3)(b) | |
verbose: 'the AI project is intended to improve the result of a previously completed human activity' | |
value: !!bool false | |
filter_exception_deviation: # Art. 6(3)(c) | |
verbose: 'the AI system is intended to detect decision-making patterns or deviations from prior decision-making patterns and is not meant to replace or influence the previously completed human assessment, without proper human review' | |
value: !!bool false | |
filter_exception_prep: # Art. 6(3)(d) | |
verbose: 'the AI system is intended to perform a preparatory task to an assessment relevant for the purposes of the use cases listed in Annex III.' | |
value: !!bool false | |
risk_management_system: | |
established: # Article 9 | |
verbose: 'Risk management system has been established, implemented, documented and maintained for AI project' | |
value: !!bool false | |
lifecycle: # Art. 9(2) | |
verbose: 'Risk management system (high-risk AI system) has been planned, run, reviewed, and updated, throughout the entire lifecycle of AI system' | |
value: !!bool false | |
risk_analysis_intended: # Art. 9(2)(a) | |
verbose: 'Risk management system for AI system includes the identification and analysis of any known or reasonably foreseeable risks that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose' | |
value: !!bool false | |
risk_estimation_foreseeable: # Art. 9(2)(b) | |
verbose: 'Risk management system for AI system includes the estimation and evaluation of the risks that may emerge when the high-risk AI system is used in accordance with its intended purpose, and under conditions of reasonably foreseeable misuse' | |
value: !!bool false | |
risk_post_market: # Art. 9(2)(c) | |
verbose: 'Risk management system for AI system includes the evaluation of other risks possibly arising, based on the analysis of data gathered from the post-market monitoring system' | |
value: !!bool false | |
risk_management_measures: # Art. 9(2)(d) | |
verbose: 'Where risk that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose have been identified, appropriate and targeted risk management measures designed to address the risks have been adopted' | |
value: !!bool false | |
documentation: # Art. 9(5) | |
verbose: 'Where risk that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose have been identified, these risks have been documented and communicated to deployers and either eliminated, if feasible, or mitigated such that any residual risk is judged to be acceptable' | |
value: !!bool false | |
tested: # Art. 9(6) | |
verbose: 'To determine the right mitigations, and to show the high-risk AI system performs consistently its intended purpose and is in compliance with the risk management requirements, the AI system has been tested' | |
value: !!bool false | |
testing_threshold: # Art. 9(8) | |
verbose: 'Testing has or will be performed before the AI system is placed on the market and has or will be carried out against prior defined metrics and probabilistic thresholds that are appropriate to the intended purpose' | |
value: !!bool false | |
# ypi: I will also add: | |
# vulnerable_groups: Art. 9(9) | |
# description: 'Consider potential adverse impacts in the view of intended purpose the high-risk AI system on individuals under 18 and other vulnerable groups.' | |
# value: !!bool true | |
# integration_with_other_laws: Art. 9(10) | |
# description: 'For providers subject to other Union law requirements, risk management procedures may be integrated or combined with those established under such law.' | |
# value: !!bool true | |
technical_documentation: | |
drawn_up: # Art. 11(1) | |
verbose: 'Technical documentation for the high-risk AI system has been drawn up before the system has been placed on the market or put into service and will be kept up-to date' | |
value: !!bool false | |
intended_purpose: # Art. 11(1); Annex IV(1)(a) | |
verbose: 'The Technical Documentation includes a general description of the AI system that covers its intended purpose, the name of the provider and the version of the system reflecting its relation to previous versions' | |
value: !!bool false | |
interaction: # Art. 11(1); Annex IV(1)(b) | |
verbose: 'The Technical Documentation includes a general description of the AI system that covers how the AI system interacts with, or can be used to interact with, hardware or software, including with other AI systems, that are not part of the AI system itself, where applicable' | |
value: !!bool false | |
versions: # Art. 11(1); Annex IV(1)(c) | |
verbose: 'Technical Documentation includes a general description of the AI system that covers the versions of relevant software or firmware, and any requirements related to version updates' | |
value: !!bool false | |
packages: # Art. 11(1); Annex IV(1)(d) | |
verbose: 'Technical Documentation includes a general description of the AI system that covers the description of all the forms in which the AI system is placed on the market or put into service, such as software packages embedded into hardware, downloads, or APIs' | |
value: !!bool false | |
hardware: # Art. 11(1); Annex IV(1)(e) | |
verbose: 'Technical Documentation includes a general description of the AI system that covers the description of the hardware on which the AI system is intended to run' | |
value: !!bool false | |
development_steps: # Art. 11(1); Annex IV(2)(a) | |
verbose: 'Technical Documentation includes a detailed description of the elements of the AI system and of the process for its development, covering the methods and steps performed for the development of the AI system, including, where relevant, recourse to pre-trained systems or tools provided by third parties and how those were used, integrated or modified by the provider' | |
value: !!bool false | |
design_specs: # Art. 11(1); Annex IV(2)(b) | |
verbose: 'Technical Documentation includes a detailed description of the elements of the AI system and of the process for its development, covering the design specifications of the system, namely the general logic of the AI system and of the algorithms; the key design choices including the rationale and assumptions made, including with regard to persons or groups of persons in respect of who, the system is intended to be used; the main classification choices; what the system is designed to optimise for, and the relevance of the different parameters; the description of the expected output and output quality of the system; the decisions about any possible trade-off made regarding the technical solutions adopted to comply with the requirements set out in Chapter III, Section 2' | |
value: !!bool false | |
risk_management: # Art. 11(1); Annex IV(5) | |
verbose: 'Technical Documentation includes a detailed description of the risk management system in accordance with Article 9' | |
value: !!bool false | |
changes: # Art. 11(1); Annex IV(6) | |
verbose: 'Technical Documentation includes a description of relevant changes made by the provider to the system through its lifecycle' | |
value: !!bool false | |
declaration_of_conformity: # Art. 11(1); Annex IV(8) | |
verbose: 'Technical Documentation includes a copy of the EU declaration of conformity referred to in Article 47' | |
value: !!bool false | |
post_market: # Art. 11(1); Annex IV(9) | |
verbose: 'Technical Documentation includes a detailed description of the system in place to evaluate the AI system performance in the post-market phase in accordance with Article 72, including the post-market monitoring plan referred to in Article 72(3)' | |
value: !!bool false | |
product: # Art. 11(2) | |
verbose: 'High-risk AI system is either not related to a product covered by the Union harmonisation legislation listed in Section A of Annex I and placed on the market or put into service or, if it is, a single set of technical documentation has been drawn up containing all the information set out in paragraph 1, as well as the information required under those legal acts' | |
value: !!bool false | |
record_keeping: | |
logging_generally: # Article 12(1) | |
verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system' | |
value: !!bool false | |
logging_risk: # Art. 12(1)(a) | |
verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for identifying situations that may result in the high-risk AI system presenting a risk within the meaning of Article 79(1) or in a substantial modification' | |
value: !!bool false | |
logging_post_market: # Art. 12(1)(b) | |
verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for facilitating the post-market monitoring referred to in Article 72' | |
value: !!bool false | |
monitoring: # Art. 12(1)(c) | |
verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for monitoring the operation of high-risk AI systems referred to in Article 26(5)' | |
value: !!bool false | |
recording_use: # Art. 12(2)(a) | |
verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the recording of the period of each use of the system (start date and time and end date and time of each use)' | |
value: !!bool false | |
reference_db: # Art. 12(2)(b) | |
verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the reference database against which input data has been checked by the system' | |
value: !!bool false | |
input: # Art. 12(2)(c) | |
verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the input data for which the search has led to a match' | |
value: !!bool false | |
identification: # Art. 12(2)(d) | |
verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the identification of the natural persons involved in the verification of the results, as referred to in Article 14(5)' | |
value: !!bool false | |
transparency_and_provision_of_information_to_deployers: | |
interpretability: # Art. 13(1) | |
verbose: 'AI system is designed and developed to ensure operation is sufficiently transparent for deployers to interpret output and use appropriately' | |
value: !!bool false | |
compliance: # Art. 13(1) | |
verbose: 'AI system is designed and developed with transparency to ensure compliance with provider and deployer obligations in Section 3' | |
value: !!bool false | |
instructions: # Art. 13(2) | |
verbose: 'AI system is accompanied by instructions for use in appropriate digital format or otherwise, with concise, complete, correct, clear, relevant, accessible, and comprehensible information for deployers' | |
value: !!bool false | |
contact_details: # Art. 13(3)(a) | |
verbose: 'Instructions include provider identity and contact details, and if applicable, authorized representative details' | |
value: !!bool false | |
characteristics: # Art. 13(3)(b)(i) | |
verbose: 'Instructions include AI system characteristics, capabilities, performance limitations, and intended purpose' | |
value: !!bool false | |
metrics: # Art. 13(3)(b)(ii) | |
verbose: 'Instructions include accuracy metrics, robustness, cybersecurity, and potential impacts on these' | |
value: !!bool false | |
foreseeable: # Art. 13(3)(b)(iii) | |
verbose: 'Instructions include foreseeable circumstances that may risk health, safety, or fundamental rights' | |
value: !!bool false | |
output: # Art. 13(3)(b)(iv) | |
verbose: 'Instructions include technical capabilities to provide information relevant to explaining output' | |
value: !!bool false | |
specific_persons: # Art. 13(3)(b)(v) | |
verbose: 'Instructions include performance regarding specific persons or groups, if applicable' | |
value: !!bool false | |
data: # Art. 13(3)(b)(vi) | |
verbose: 'Instructions include input data specifications and relevant training, validation, and testing dataset information' | |
value: !!bool false | |
deployers: # Art. 13(3)(b)(vii) | |
verbose: 'Instructions include information to enable deployers to interpret and appropriately use AI system output' | |
value: !!bool false | |
changes: # Art. 13(3)(c) | |
verbose: 'Instructions include predetermined changes to AI system and its performance since initial conformity assessment' | |
value: !!bool false | |
oversight_measures: # Art. 13(3)(d) | |
verbose: 'Instructions include human oversight measures and technical measures for output interpretation' | |
value: !!bool false | |
hardware: # Art. 13(3)(e) | |
verbose: 'Instructions include computational and hardware resource needs, expected lifetime, and maintenance measures' | |
value: !!bool false | |
logging: # Art. 13(3)(f) | |
verbose: 'Instructions include description of mechanisms for deployers to collect, store, and interpret logs, if applicable' | |
value: !!bool false | |
human_oversight: | |
designed: # Art. 14(1) | |
verbose: 'AI system is designed and developed to be effectively overseen by natural persons during use, including appropriate human-machine interface tools' | |
value: !!bool false | |
minimize_risks: # Art. 14(2) | |
verbose: 'Human oversight aims to prevent or minimize risks to health, safety, or fundamental rights during intended use or foreseeable misuse' | |
value: !!bool false | |
commensurate: # Art. 14(3) | |
verbose: 'Oversight measures are commensurate with risks, autonomy level, and use context, ensured through provider-built measures and/or deployer-implemented measures' | |
value: !!bool false | |
understandable: # Art. 14(4) | |
verbose: 'AI system enables assigned persons to understand its capacities and limitations, monitor operation, and detect anomalies' | |
value: !!bool false | |
automation_bias: # Art. 14(4)(a) | |
verbose: 'AI system enables assigned persons to be aware of potential automation bias' | |
value: !!bool false | |
interpretabilty: # Art. 14(4)(c) | |
verbose: 'AI system enables assigned persons to correctly interpret its output' | |
value: !!bool false | |
override: # Art. 14(4)(d) | |
verbose: 'AI system enables assigned persons to decide not to use it or override its output' | |
value: !!bool false | |
stop_button: # Art. 14(4)(e) | |
verbose: 'AI system enables assigned persons to intervene or halt the system through a stop button or similar procedure' | |
value: !!bool false | |
verification: # Art. 14(5) | |
verbose: 'For Annex III point 1(a) systems, actions or decisions require verification by at least two competent persons, with exceptions for law enforcement, migration, border control, or asylum' | |
value: !!bool false | |
accuracy_robustness_cybersecurity: | |
design: # Art. 15(1) | |
verbose: 'AI system is designed and developed to achieve appropriate levels of accuracy, robustness, and cybersecurity, performing consistently throughout its lifecycle' | |
value: !!bool false | |
metrics_in_instructions: # Art. 15(3) | |
verbose: 'Accuracy levels and relevant metrics are declared in accompanying instructions of use' | |
value: !!bool false | |
error_resiliance: # Art. 15(4) | |
verbose: 'AI system is resilient against errors, faults, or inconsistencies, with technical and organizational measures implemented' | |
value: !!bool false | |
bias: # Art. 15(4) | |
verbose: 'AI system that continues learning after deployment is designed to eliminate or reduce risk of biased outputs influencing future operations' | |
value: !!bool false | |
unauthorized_use: # Art. 15(5) | |
verbose: 'AI system is resilient against unauthorized third-party attempts to alter use, outputs, or performance' | |
value: !!bool false | |
cybersecurity_solutions: # Art. 15(5) | |
verbose: 'Cybersecurity solutions are appropriate to relevant circumstances and risks' | |
value: !!bool false | |
ai_vulnerabilities: # Art. 15(5) | |
verbose: 'Technical solutions address AI-specific vulnerabilities, including measures against data poisoning, model poisoning, adversarial examples, and confidentiality attacks' | |
value: !!bool false | |
quality_management_system: | |
quality_management_system: # Art. 17(1)(a) | |
verbose: 'Initiative is subject to a quality management system with strategy for regulatory compliance' | |
value: !!bool false | |
design: # Art. 17(1)(b) | |
verbose: 'System includes techniques, procedures, and actions for design, control, and verification of high-risk AI system' | |
value: !!bool false | |
quality_control: # Art. 17(1)(c) | |
verbose: 'System includes techniques, procedures, and actions for development, quality control, and quality assurance' | |
value: !!bool false | |
testing: # Art. 17(1)(d) | |
verbose: 'System includes examination, test, and validation procedures before, during, and after development' | |
value: !!bool false | |
fundamental_rights_assessment: | |
process: # Art. 27(1)(a) | |
verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the deployer’s processes in which the high-risk AI system will be used in line with its intended purpose' | |
value: !!bool false | |
time_period: # Art. 27(1)(b) | |
value: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the period of time within which, and the frequency with which, each high-risk AI system is intended to be used' | |
value: !!bool false | |
persons_affected: # Art. 27(1)(c) | |
verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the categories of natural persons and groups likely to be affected by its use in the specific context' | |
value: !!bool false | |
likely_harms: # Art. 27(1)(d) | |
verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the specific risks of harm likely to have an impact on the categories of natural persons and groups likely to be affected by its use in the specific context' | |
value: !!bool false | |
human_oversight: # Art. 27(1)(e) | |
verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the implementation of human oversight measures, according to the instructions for use' | |
value: !!bool false | |
risk_mitigation: # Art. 27(1)(f) | |
verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the measures to be taken in the case of the materialisation of risks of harm likely to have an impact on the categories of natural persons and groups likely to be affected by its use in the specific context, including the arrangements for internal governance and complaint mechanisms' | |
value: !!bool false | |
# Information related to the Act's requirements for all AI systems | |
transparency_obligations: | |
synthetic_content: # Art. 50(2) | |
verbose: 'Providers of AI systems generating synthetic content ensure outputs are marked and detectable as artificially generated' | |
value: !!bool false | |
marking_solutions: # Art. 50(2) | |
verbose: 'Technical solutions for marking are effective, interoperable, robust, and reliable' | |
value: !!bool false | |
# Information related to the Act's requirements for GPAI models | |
gpai_model_provider_obligations: | |
intended_uses: # Art. 53(1)(a); Annex XI(1)(1)(a-c) | |
verbose: 'Provide information on intended tasks, integration types, and acceptable use policies' | |
value: !!bool false | |
model_architecture: # Art. 53(1)(a); Annex XI(1)(1)(d-f) | |
verbose: 'Provide details on model architecture, parameters, input/output modalities, and license' | |
value: !!bool false | |
training_methodologies: # Art. 53(1)(b); Annex XI(1)(2)(b) | |
verbose: 'Describe training methodologies, key design choices, and optimization goals' | |
value: !!bool false | |
data: # Art. 53(1)(b); Annex XI(1)(2)(c) | |
verbose: 'Provide information on training, testing, and validation data' | |
value: !!bool false | |
computation: # Art. 53(1)(b); Annex XI(1)(2)(d-e) | |
verbose: 'Disclose computational resources and energy consumption for training' | |
value: !!bool false | |
evaluation: # Art. 53(1)(b); Annex XI(2)(1-2) | |
verbose: 'Describe evaluation strategies, results, and adversarial testing measures' | |
value: !!bool false | |
general_description: # Art. 53(1)(b); Annex XII(1)(a-h) | |
verbose: 'To downstream providers, provide general description of GPAI model, including intended tasks and integration types' | |
value: !!bool false | |
development_process: # Art. 53(1)(b); Annex XII(2)(a-c) | |
verbose: 'To downstream providers, describe model elements, development process, and integration requirements' | |
value: !!bool false | |
# Information related to the Act's requirements for GPAI models with systematic risk | |
obligations_for_gpai_models_with_systemic_risk: | |
evaluation: # Art. 55(1)(a) | |
verbose: 'Perform model evaluation using standardized protocols and conduct adversarial testing' | |
value: !!bool false | |
mitigation: # Art. 55(1)(b) | |
verbose: 'Assess and mitigate possible systemic risks at Union level' | |
value: !!bool false | |
cybersecurity: # Art. 55(1)(d) | |
verbose: 'Ensure adequate cybersecurity protection for the model and infrastructure' | |
value: !!bool false | |