wlmbrown commited on
Commit
83c9c9a
·
1 Parent(s): ec04efb

Changes to Project CC

Browse files
Files changed (1) hide show
  1. project_cc.yaml +98 -66
project_cc.yaml CHANGED
@@ -1,18 +1,18 @@
1
 
2
- # Information related to high-level characteristics of AI project, including its market status, operator, and type of AI
3
 
4
  operator_role:
5
  provider: # Art. 2
6
- verbose: 'The operator of this AI project is a natural or legal person, public authority, agency or other body that develops an AI system or a general-purpose AI model or that has an AI system or a general-purpose AI model developed and places it on the market or puts the AI system into service under its own name or trademark, whether for payment or free of charge'
7
  value: !!bool false
8
  deployer: # Art. 2
9
- verbose: 'AI project operator is a natural or legal person, public authority, agency or other body using an AI system under its authority except where the AI system is used in the course of a personal non-professional activity'
10
  value: !!bool false
11
  eu_located: # Art. 2
12
  verbose: 'AI project operator has its place of establishment or location within the Union'
13
  value: !!bool True
14
  output_used: # Art. 2
15
- verbose: 'The output produced by the AI system is used in the Union'
16
  value: !!bool false
17
  importer: # Art. 2
18
  verbose: 'AI project operator is a natural or legal person located or established in the Union that places on the market an AI system that bears the name or trademark of a natural or legal person established in a third country'
@@ -53,6 +53,30 @@ gpai_model_systematic_risk:
53
  verbose: 'The cumulative amount of computation used for the training of the AI project, as measured in floating point operations (FLOPs), has been greater than 10^25'
54
  value: !!bool false
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  # Information related to the Act's exceptions for scientific research, open-source AI, and more
57
 
58
  excepted:
@@ -114,13 +138,13 @@ high_risk_ai_system:
114
  verbose: 'AI project is intended to be used as a safety component of a product'
115
  value: !!bool false
116
  product_covered_by_machinery_regulation: # Art. 6(1)(b); Annex I
117
- verbose: 'AI project is itself a product, covered by Directive 2006/42/EC of the European Parliament and of the Council of 17 May 2006 on machinery, and amending Directive 95/16/EC (OJ L 157, 9.6.2006, p. 24) [as repealed by the Machinery Regulation]'
118
  value: !!bool false
119
  product_covered_by_toy_safety_regulation: # Art. 6(1)(b); Annex I
120
- verbose: 'AI project is itself a product, covered by Directive 2009/48/EC of the European Parliament and of the Council of 18 June 2009 on the safety of toys (OJ L 170, 30.6.2009, p. 1)'
121
  value: !!bool false
122
  product_covered_by_watercraft_regulation: # Art. 6(1)(b); Annex I
123
- verbose: 'AI project is itself a product, covered by Directive 2013/53/EU of the European Parliament and of the Council of 20 November 2013 on recreational craft and personal watercraft and repealing Directive 94/25/EC (OJ L 354, 28.12.2013, p. 90)'
124
  value: !!bool false
125
  biometric_categorization: # Art. 6(2); Annex III(1)(b)
126
  verbose: 'AI project is intended to be used for biometric categorisation, according to sensitive or protected attributes or characteristics based on the inference of those attributes or characteristics'
@@ -129,7 +153,7 @@ high_risk_ai_system:
129
  verbose: 'AI project is intended to be used for emotion recognition'
130
  value: !!bool false
131
  critical_infrastructure: # Art. 6(2); Annex III(2)
132
- verbose: 'AI project is intended to be used as safety components in the management and operation of critical digital infrastructure, road traffic, or in the supply of water, gas, heating or electricity'
133
  value: !!bool false
134
  educational: # Art. 6(2); Annex III(3)(a)
135
  verbose: 'AI project is intended to be used to determine access or admission or to assign natural persons to educational and vocational training institutions at all levels'
@@ -159,65 +183,65 @@ high_risk_ai_system:
159
  verbose: 'the AI project is intended to improve the result of a previously completed human activity'
160
  value: !!bool false
161
  filter_exception_deviation: # Art. 6(3)(c)
162
- verbose: 'the AI system is intended to detect decision-making patterns or deviations from prior decision-making patterns and is not meant to replace or influence the previously completed human assessment, without proper human review'
163
  value: !!bool false
164
  filter_exception_prep: # Art. 6(3)(d)
165
- verbose: 'the AI system is intended to perform a preparatory task to an assessment relevant for the purposes of the use cases listed in Annex III.'
166
  value: !!bool false
167
 
168
  risk_management_system:
169
  established: # Article 9
170
- verbose: 'Risk management system has been established, implemented, documented and maintained for AI project'
171
  value: !!bool false
172
  lifecycle: # Art. 9(2)
173
- verbose: 'Risk management system (high-risk AI system) has been planned, run, reviewed, and updated, throughout the entire lifecycle of AI system'
174
  value: !!bool false
175
  risk_analysis_intended: # Art. 9(2)(a)
176
- verbose: 'Risk management system for AI system includes the identification and analysis of any known or reasonably foreseeable risks that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose'
177
  value: !!bool false
178
  risk_estimation_foreseeable: # Art. 9(2)(b)
179
- verbose: 'Risk management system for AI system includes the estimation and evaluation of the risks that may emerge when the high-risk AI system is used in accordance with its intended purpose, and under conditions of reasonably foreseeable misuse'
180
  value: !!bool false
181
  risk_post_market: # Art. 9(2)(c)
182
- verbose: 'Risk management system for AI system includes the evaluation of other risks possibly arising, based on the analysis of data gathered from the post-market monitoring system'
183
  value: !!bool false
184
  risk_management_measures: # Art. 9(2)(d)
185
- verbose: 'Where risk that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose have been identified, appropriate and targeted risk management measures designed to address the risks have been adopted'
186
  value: !!bool false
187
  documentation: # Art. 9(5)
188
- verbose: 'Where risk that the AI system might pose to health, safety or fundamental rights when used in accordance with its intended purpose have been identified, these risks have been documented and communicated to deployers and either eliminated, if feasible, or mitigated such that any residual risk is judged to be acceptable'
189
  value: !!bool false
190
  tested: # Art. 9(6)
191
- verbose: 'To determine the right mitigations, and to show the high-risk AI system performs consistently its intended purpose and is in compliance with the risk management requirements, the AI system has been tested'
192
  value: !!bool false
193
  testing_threshold: # Art. 9(8)
194
- verbose: 'Testing has or will be performed before the AI system is placed on the market and has or will be carried out against prior defined metrics and probabilistic thresholds that are appropriate to the intended purpose'
195
  value: !!bool false
196
 
197
  technical_documentation:
198
  drawn_up: # Art. 11(1)
199
- verbose: 'Technical documentation for the high-risk AI system has been drawn up before the system has been placed on the market or put into service and will be kept up-to date'
200
  value: !!bool false
201
  intended_purpose: # Art. 11(1); Annex IV(1)(a)
202
- verbose: 'The Technical Documentation includes a general description of the AI system that covers its intended purpose, the name of the provider and the version of the system reflecting its relation to previous versions'
203
  value: !!bool false
204
  interaction: # Art. 11(1); Annex IV(1)(b)
205
- verbose: 'The Technical Documentation includes a general description of the AI system that covers how the AI system interacts with, or can be used to interact with, hardware or software, including with other AI systems, that are not part of the AI system itself, where applicable'
206
  value: !!bool false
207
  versions: # Art. 11(1); Annex IV(1)(c)
208
- verbose: 'Technical Documentation includes a general description of the AI system that covers the versions of relevant software or firmware, and any requirements related to version updates'
209
  value: !!bool false
210
  packages: # Art. 11(1); Annex IV(1)(d)
211
- verbose: 'Technical Documentation includes a general description of the AI system that covers the description of all the forms in which the AI system is placed on the market or put into service, such as software packages embedded into hardware, downloads, or APIs'
212
  value: !!bool false
213
  hardware: # Art. 11(1); Annex IV(1)(e)
214
- verbose: 'Technical Documentation includes a general description of the AI system that covers the description of the hardware on which the AI system is intended to run'
215
  value: !!bool false
216
  development_steps: # Art. 11(1); Annex IV(2)(a)
217
- verbose: 'Technical Documentation includes a detailed description of the elements of the AI system and of the process for its development, covering the methods and steps performed for the development of the AI system, including, where relevant, recourse to pre-trained systems or tools provided by third parties and how those were used, integrated or modified by the provider'
218
  value: !!bool false
219
  design_specs: # Art. 11(1); Annex IV(2)(b)
220
- verbose: 'Technical Documentation includes a detailed description of the elements of the AI system and of the process for its development, covering the design specifications of the system, namely the general logic of the AI system and of the algorithms; the key design choices including the rationale and assumptions made, including with regard to persons or groups of persons in respect of who, the system is intended to be used; the main classification choices; what the system is designed to optimise for, and the relevance of the different parameters; the description of the expected output and output quality of the system; the decisions about any possible trade-off made regarding the technical solutions adopted to comply with the requirements set out in Chapter III, Section 2'
221
  value: !!bool false
222
  risk_management: # Art. 11(1); Annex IV(5)
223
  verbose: 'Technical Documentation includes a detailed description of the risk management system in accordance with Article 9'
@@ -229,27 +253,27 @@ technical_documentation:
229
  verbose: 'Technical Documentation includes a copy of the EU declaration of conformity referred to in Article 47'
230
  value: !!bool false
231
  post_market: # Art. 11(1); Annex IV(9)
232
- verbose: 'Technical Documentation includes a detailed description of the system in place to evaluate the AI system performance in the post-market phase in accordance with Article 72, including the post-market monitoring plan referred to in Article 72(3)'
233
  value: !!bool false
234
  product: # Art. 11(2)
235
- verbose: 'High-risk AI system is either not related to a product covered by the Union harmonisation legislation listed in Section A of Annex I and placed on the market or put into service or, if it is, a single set of technical documentation has been drawn up containing all the information set out in paragraph 1, as well as the information required under those legal acts'
236
  value: !!bool false
237
 
238
  record_keeping:
239
  logging_generally: # Article 12(1)
240
- verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system'
241
  value: !!bool false
242
  logging_risk: # Art. 12(1)(a)
243
- verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for identifying situations that may result in the high-risk AI system presenting a risk within the meaning of Article 79(1) or in a substantial modification'
244
  value: !!bool false
245
  logging_post_market: # Art. 12(1)(b)
246
- verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for facilitating the post-market monitoring referred to in Article 72'
247
  value: !!bool false
248
  monitoring: # Art. 12(1)(c)
249
- verbose: 'The AI system technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for monitoring the operation of high-risk AI systems referred to in Article 26(5)'
250
  value: !!bool false
251
  recording_use: # Art. 12(2)(a)
252
- verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the recording of the period of each use of the system (start date and time and end date and time of each use)'
253
  value: !!bool false
254
  reference_db: # Art. 12(2)(b)
255
  verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the reference database against which input data has been checked by the system'
@@ -263,19 +287,19 @@ record_keeping:
263
 
264
  transparency_and_provision_of_information_to_deployers:
265
  interpretability: # Art. 13(1)
266
- verbose: 'AI system is designed and developed to ensure operation is sufficiently transparent for deployers to interpret output and use appropriately'
267
  value: !!bool false
268
  compliance: # Art. 13(1)
269
- verbose: 'AI system is designed and developed with transparency to ensure compliance with provider and deployer obligations in Section 3'
270
  value: !!bool false
271
  instructions: # Art. 13(2)
272
- verbose: 'AI system is accompanied by instructions for use in appropriate digital format or otherwise, with concise, complete, correct, clear, relevant, accessible, and comprehensible information for deployers'
273
  value: !!bool false
274
  contact_details: # Art. 13(3)(a)
275
  verbose: 'Instructions include provider identity and contact details, and if applicable, authorized representative details'
276
  value: !!bool false
277
  characteristics: # Art. 13(3)(b)(i)
278
- verbose: 'Instructions include AI system characteristics, capabilities, performance limitations, and intended purpose'
279
  value: !!bool false
280
  metrics: # Art. 13(3)(b)(ii)
281
  verbose: 'Instructions include accuracy metrics, robustness, cybersecurity, and potential impacts on these'
@@ -293,10 +317,10 @@ transparency_and_provision_of_information_to_deployers:
293
  verbose: 'Instructions include input data specifications and relevant training, validation, and testing dataset information'
294
  value: !!bool false
295
  deployers: # Art. 13(3)(b)(vii)
296
- verbose: 'Instructions include information to enable deployers to interpret and appropriately use AI system output'
297
  value: !!bool false
298
  changes: # Art. 13(3)(c)
299
- verbose: 'Instructions include predetermined changes to AI system and its performance since initial conformity assessment'
300
  value: !!bool false
301
  oversight_measures: # Art. 13(3)(d)
302
  verbose: 'Instructions include human oversight measures and technical measures for output interpretation'
@@ -310,28 +334,28 @@ transparency_and_provision_of_information_to_deployers:
310
 
311
  human_oversight:
312
  designed: # Art. 14(1)
313
- verbose: 'AI system is designed and developed to be effectively overseen by natural persons during use, including appropriate human-machine interface tools'
314
  value: !!bool false
315
  minimize_risks: # Art. 14(2)
316
- verbose: 'Human oversight aims to prevent or minimize risks to health, safety, or fundamental rights during intended use or foreseeable misuse'
317
  value: !!bool false
318
  commensurate: # Art. 14(3)
319
- verbose: 'Oversight measures are commensurate with risks, autonomy level, and use context, ensured through provider-built measures and/or deployer-implemented measures'
320
  value: !!bool false
321
  understandable: # Art. 14(4)
322
- verbose: 'AI system enables assigned persons to understand its capacities and limitations, monitor operation, and detect anomalies'
323
  value: !!bool false
324
  automation_bias: # Art. 14(4)(a)
325
- verbose: 'AI system enables assigned persons to be aware of potential automation bias'
326
  value: !!bool false
327
  interpretabilty: # Art. 14(4)(c)
328
- verbose: 'AI system enables assigned persons to correctly interpret its output'
329
  value: !!bool false
330
  override: # Art. 14(4)(d)
331
- verbose: 'AI system enables assigned persons to decide not to use it or override its output'
332
  value: !!bool false
333
  stop_button: # Art. 14(4)(e)
334
- verbose: 'AI system enables assigned persons to intervene or halt the system through a stop button or similar procedure'
335
  value: !!bool false
336
  verification: # Art. 14(5)
337
  verbose: 'For Annex III point 1(a) systems, actions or decisions require verification by at least two competent persons, with exceptions for law enforcement, migration, border control, or asylum'
@@ -339,47 +363,47 @@ human_oversight:
339
 
340
  accuracy_robustness_cybersecurity:
341
  design: # Art. 15(1)
342
- verbose: 'AI system is designed and developed to achieve appropriate levels of accuracy, robustness, and cybersecurity, performing consistently throughout its lifecycle'
343
  value: !!bool false
344
  metrics_in_instructions: # Art. 15(3)
345
- verbose: 'Accuracy levels and relevant metrics are declared in accompanying instructions of use'
346
  value: !!bool false
347
  error_resiliance: # Art. 15(4)
348
- verbose: 'AI system is resilient against errors, faults, or inconsistencies, with technical and organizational measures implemented'
349
  value: !!bool false
350
  bias: # Art. 15(4)
351
- verbose: 'AI system that continues learning after deployment is designed to eliminate or reduce risk of biased outputs influencing future operations'
352
  value: !!bool false
353
  unauthorized_use: # Art. 15(5)
354
- verbose: 'AI system is resilient against unauthorized third-party attempts to alter use, outputs, or performance'
355
  value: !!bool false
356
  cybersecurity_solutions: # Art. 15(5)
357
- verbose: 'Cybersecurity solutions are appropriate to relevant circumstances and risks'
358
  value: !!bool false
359
  ai_vulnerabilities: # Art. 15(5)
360
- verbose: 'Technical solutions address AI-specific vulnerabilities, including measures against data poisoning, model poisoning, adversarial examples, and confidentiality attacks'
361
  value: !!bool false
362
 
363
  quality_management_system:
364
  quality_management_system: # Art. 17(1)(a)
365
- verbose: 'Initiative is subject to a quality management system with strategy for regulatory compliance'
366
  value: !!bool false
367
  design: # Art. 17(1)(b)
368
- verbose: 'System includes techniques, procedures, and actions for design, control, and verification of high-risk AI system'
369
  value: !!bool false
370
  quality_control: # Art. 17(1)(c)
371
- verbose: 'System includes techniques, procedures, and actions for development, quality control, and quality assurance'
372
  value: !!bool false
373
  testing: # Art. 17(1)(d)
374
- verbose: 'System includes examination, test, and validation procedures before, during, and after development'
375
  value: !!bool false
376
 
377
  fundamental_rights_assessment:
378
  process: # Art. 27(1)(a)
379
- verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the deployer’s processes in which the high-risk AI system will be used in line with its intended purpose'
380
  value: !!bool false
381
  time_period: # Art. 27(1)(b)
382
- value: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the period of time within which, and the frequency with which, each high-risk AI system is intended to be used'
383
  value: !!bool false
384
  persons_affected: # Art. 27(1)(c)
385
  verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the categories of natural persons and groups likely to be affected by its use in the specific context'
@@ -397,11 +421,13 @@ fundamental_rights_assessment:
397
  # Information related to the Act's requirements for all AI systems
398
 
399
  transparency_obligations:
 
 
400
  synthetic_content: # Art. 50(2)
401
- verbose: 'Providers of AI systems generating synthetic content ensure outputs are marked and detectable as artificially generated'
402
  value: !!bool false
403
  marking_solutions: # Art. 50(2)
404
- verbose: 'Technical solutions for marking are effective, interoperable, robust, and reliable'
405
  value: !!bool false
406
 
407
  # Information related to the Act's requirements for GPAI models
@@ -436,13 +462,19 @@ gpai_model_provider_obligations:
436
 
437
  obligations_for_gpai_models_with_systemic_risk:
438
  evaluation: # Art. 55(1)(a)
439
- verbose: 'Perform model evaluation using standardized protocols and conduct adversarial testing'
 
 
 
 
 
 
440
  value: !!bool false
441
  mitigation: # Art. 55(1)(b)
442
- verbose: 'Assess and mitigate possible systemic risks at Union level'
443
  value: !!bool false
444
  cybersecurity: # Art. 55(1)(d)
445
- verbose: 'Ensure adequate cybersecurity protection for the model and infrastructure'
446
  value: !!bool false
447
 
448
 
 
1
 
2
+ # Information related to high-level characteristics of AI project, including the role of the operator, market status, and type of AI
3
 
4
  operator_role:
5
  provider: # Art. 2
6
+ verbose: 'The operator of this AI project is a natural or legal person, public authority, agency or other body that develops an AI project or a general-purpose AI model or that has an AI system or a general-purpose AI model developed and places it on the market or puts the AI system into service under its own name or trademark, whether for payment or free of charge'
7
  value: !!bool false
8
  deployer: # Art. 2
9
+ verbose: 'AI project operator is a natural or legal person, public authority, agency or other body using an AI project under its authority except where the AI system is used in the course of a personal non-professional activity'
10
  value: !!bool false
11
  eu_located: # Art. 2
12
  verbose: 'AI project operator has its place of establishment or location within the Union'
13
  value: !!bool True
14
  output_used: # Art. 2
15
+ verbose: 'The output produced by the AI project is used in the Union'
16
  value: !!bool false
17
  importer: # Art. 2
18
  verbose: 'AI project operator is a natural or legal person located or established in the Union that places on the market an AI system that bears the name or trademark of a natural or legal person established in a third country'
 
53
  verbose: 'The cumulative amount of computation used for the training of the AI project, as measured in floating point operations (FLOPs), has been greater than 10^25'
54
  value: !!bool false
55
 
56
+ transparency_related:
57
+ direct_user_interaction:
58
+ verbose: 'The AI project is intended to interact directly with natural persons'
59
+ value: !!bool false
60
+ exception_obvious:
61
+ verbose: 'When interacting with the AI project, it would be obvious from the point of view of a natural person who is reasonably well-informed, observant and circumspect, taking into account the circumstances and the context of use that they are interacting with AI'
62
+ exception_law:
63
+ verbose: 'The AI project is authorised by law to detect, prevent, investigate or prosecute criminal offences, subject to appropriate safeguards for the rights and freedoms of third parties, and is not available for the public to report a criminal offence'
64
+ synthetic content: # Art. 50(2)
65
+ verbose: 'The AI project generates synthetic audio, image, video or text content'
66
+ value: !!bool false
67
+ exception_assistive:
68
+ verbose: 'The AI project performs an assistive function for standard editing'
69
+ value: !!bool false
70
+ exception_inubstantial:
71
+ verbose: 'The AI project does not substantially alter the input data provided by the deployer or the semantics thereof, or where authorised by law to detect, prevent, investigate or prosecute criminal offences.'
72
+ value: !!bool false
73
+ emotion_reconition:
74
+ verbose: 'The AI project is an emotion recognition system'
75
+ value: !!bool false
76
+ biometric_categorization:
77
+ verbose: 'The AI project is a biometric categorisation system'
78
+ value: !!bool false
79
+
80
  # Information related to the Act's exceptions for scientific research, open-source AI, and more
81
 
82
  excepted:
 
138
  verbose: 'AI project is intended to be used as a safety component of a product'
139
  value: !!bool false
140
  product_covered_by_machinery_regulation: # Art. 6(1)(b); Annex I
141
+ verbose: 'AI project is itself a product that is covered by Directive 2006/42/EC of the European Parliament and of the Council of 17 May 2006 on machinery, and amending Directive 95/16/EC (OJ L 157, 9.6.2006, p. 24) [as repealed by the Machinery Regulation]'
142
  value: !!bool false
143
  product_covered_by_toy_safety_regulation: # Art. 6(1)(b); Annex I
144
+ verbose: 'AI project is itself a product that is covered by Directive 2009/48/EC of the European Parliament and of the Council of 18 June 2009 on the safety of toys (OJ L 170, 30.6.2009, p. 1)'
145
  value: !!bool false
146
  product_covered_by_watercraft_regulation: # Art. 6(1)(b); Annex I
147
+ verbose: 'AI project is itself a product that is covered by Directive 2013/53/EU of the European Parliament and of the Council of 20 November 2013 on recreational craft and personal watercraft and repealing Directive 94/25/EC (OJ L 354, 28.12.2013, p. 90)'
148
  value: !!bool false
149
  biometric_categorization: # Art. 6(2); Annex III(1)(b)
150
  verbose: 'AI project is intended to be used for biometric categorisation, according to sensitive or protected attributes or characteristics based on the inference of those attributes or characteristics'
 
153
  verbose: 'AI project is intended to be used for emotion recognition'
154
  value: !!bool false
155
  critical_infrastructure: # Art. 6(2); Annex III(2)
156
+ verbose: 'AI project is intended to be used as a safety component in the management and operation of critical digital infrastructure, road traffic, or in the supply of water, gas, heating or electricity'
157
  value: !!bool false
158
  educational: # Art. 6(2); Annex III(3)(a)
159
  verbose: 'AI project is intended to be used to determine access or admission or to assign natural persons to educational and vocational training institutions at all levels'
 
183
  verbose: 'the AI project is intended to improve the result of a previously completed human activity'
184
  value: !!bool false
185
  filter_exception_deviation: # Art. 6(3)(c)
186
+ verbose: 'the AI project is intended to detect decision-making patterns or deviations from prior decision-making patterns and is not meant to replace or influence the previously completed human assessment, without proper human review'
187
  value: !!bool false
188
  filter_exception_prep: # Art. 6(3)(d)
189
+ verbose: 'the AI project is intended to perform a preparatory task to an assessment relevant for the purposes of the use cases listed in Annex III.'
190
  value: !!bool false
191
 
192
  risk_management_system:
193
  established: # Article 9
194
+ verbose: 'A risk management system has been established, implemented, documented and maintained for the AI project'
195
  value: !!bool false
196
  lifecycle: # Art. 9(2)
197
+ verbose: 'A risk management system has been planned, run, reviewed, and updated throughout the entire lifecycle of the AI project'
198
  value: !!bool false
199
  risk_analysis_intended: # Art. 9(2)(a)
200
+ verbose: 'The risk management system for the AI project includes the identification and analysis of any known or reasonably foreseeable risks that the AI project might pose to health, safety or fundamental rights when used in accordance with its intended purpose'
201
  value: !!bool false
202
  risk_estimation_foreseeable: # Art. 9(2)(b)
203
+ verbose: 'The risk management system for the AI project includes the estimation and evaluation of the risks that may emerge when the AI project is used in accordance with its intended purpose, and under conditions of reasonably foreseeable misuse'
204
  value: !!bool false
205
  risk_post_market: # Art. 9(2)(c)
206
+ verbose: 'The risk management system for the AI project includes the evaluation of other risks possibly arising, based on the analysis of data gathered from the post-market monitoring system'
207
  value: !!bool false
208
  risk_management_measures: # Art. 9(2)(d)
209
+ verbose: 'Where any risks that the AI project might pose to health, safety or fundamental rights when used in accordance with its intended purpose have been identified, appropriate and targeted risk management measures designed to address those risks have been adopted'
210
  value: !!bool false
211
  documentation: # Art. 9(5)
212
+ verbose: 'Where any risks that the AI project might pose to health, safety or fundamental rights when used in accordance with its intended purpose have been identified, these risks have been documented and communicated to deployers and either eliminated, if feasible, or mitigated such that any residual risk is judged to be acceptable'
213
  value: !!bool false
214
  tested: # Art. 9(6)
215
+ verbose: 'To determine the right mitigations, and to show the AI project performs consistently its intended purpose and is in compliance with the risk management requirements, the AI project has been tested'
216
  value: !!bool false
217
  testing_threshold: # Art. 9(8)
218
+ verbose: 'Testing has or will be performed before the AI project is placed on the market and has or will be carried out against prior defined metrics and probabilistic thresholds that are appropriate to the intended purpose'
219
  value: !!bool false
220
 
221
  technical_documentation:
222
  drawn_up: # Art. 11(1)
223
+ verbose: 'Technical documentation for the high-risk AI project has been drawn up before the system has been placed on the market or put into service and will be kept up-to date'
224
  value: !!bool false
225
  intended_purpose: # Art. 11(1); Annex IV(1)(a)
226
+ verbose: 'The Technical Documentation includes a general description of the AI project that covers its intended purpose, the name of the provider and the version of the system reflecting its relation to previous versions'
227
  value: !!bool false
228
  interaction: # Art. 11(1); Annex IV(1)(b)
229
+ verbose: 'The Technical Documentation includes a general description of the AI project that covers how the AI project interacts with, or can be used to interact with, hardware or software, including with separate AI systems, that are not part of the AI project itself, where applicable'
230
  value: !!bool false
231
  versions: # Art. 11(1); Annex IV(1)(c)
232
+ verbose: 'Technical Documentation includes a general description of the AI project that covers the versions of relevant software or firmware, and any requirements related to version updates'
233
  value: !!bool false
234
  packages: # Art. 11(1); Annex IV(1)(d)
235
+ verbose: 'Technical Documentation includes a general description of the AI project that covers the description of all the forms in which the AI project is placed on the market or put into service, such as software packages embedded into hardware, downloads, or APIs'
236
  value: !!bool false
237
  hardware: # Art. 11(1); Annex IV(1)(e)
238
+ verbose: 'Technical Documentation includes a general description of the AI project that covers the description of the hardware on which the AI project is intended to run'
239
  value: !!bool false
240
  development_steps: # Art. 11(1); Annex IV(2)(a)
241
+ verbose: 'Technical Documentation includes a detailed description of the elements of the AI project and of the process for its development, covering the methods and steps performed for the development of the AI project, including, where relevant, recourse to pre-trained systems or tools provided by third parties and how those were used, integrated or modified by the provider'
242
  value: !!bool false
243
  design_specs: # Art. 11(1); Annex IV(2)(b)
244
+ verbose: 'Technical Documentation includes a detailed description of the elements of the AI project and of the process for its development, covering the design specifications of the system, namely the general logic of the AI project and of the algorithms; the key design choices including the rationale and assumptions made, including with regard to persons or groups of persons in respect of who, the system is intended to be used; the main classification choices; what the system is designed to optimise for, and the relevance of the different parameters; the description of the expected output and output quality of the system; the decisions about any possible trade-off made regarding the technical solutions adopted to comply with the requirements set out in Chapter III, Section 2'
245
  value: !!bool false
246
  risk_management: # Art. 11(1); Annex IV(5)
247
  verbose: 'Technical Documentation includes a detailed description of the risk management system in accordance with Article 9'
 
253
  verbose: 'Technical Documentation includes a copy of the EU declaration of conformity referred to in Article 47'
254
  value: !!bool false
255
  post_market: # Art. 11(1); Annex IV(9)
256
+ verbose: 'Technical Documentation includes a detailed description of the system in place to evaluate the AI project performance in the post-market phase in accordance with Article 72, including the post-market monitoring plan referred to in Article 72(3)'
257
  value: !!bool false
258
  product: # Art. 11(2)
259
+ verbose: 'The AI project is either not related to a product covered by the Union harmonisation legislation listed in Section A of Annex I and placed on the market or put into service or, if it is, a single set of technical documentation has been drawn up containing all the information set out in paragraph 1, as well as the information required under those legal acts'
260
  value: !!bool false
261
 
262
  record_keeping:
263
  logging_generally: # Article 12(1)
264
+ verbose: 'The AI project technically allows for the automatic recording of events (logs) over the lifetime of the system'
265
  value: !!bool false
266
  logging_risk: # Art. 12(1)(a)
267
+ verbose: 'The AI project technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for identifying situations that may result in the AI projectpresenting a risk within the meaning of Article 79(1) or in a substantial modification'
268
  value: !!bool false
269
  logging_post_market: # Art. 12(1)(b)
270
+ verbose: 'The AI project technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for facilitating the post-market monitoring referred to in Article 72'
271
  value: !!bool false
272
  monitoring: # Art. 12(1)(c)
273
+ verbose: 'The AI project technically allows for the automatic recording of events (logs) over the lifetime of the system and these logging capabilities enable the recording of events relevant for monitoring the operation of AI projects referred to in Article 26(5)'
274
  value: !!bool false
275
  recording_use: # Art. 12(2)(a)
276
+ verbose: 'For the remote biometric identification systems AI projects referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the recording of the period of each use of the system (start date and time and end date and time of each use)'
277
  value: !!bool false
278
  reference_db: # Art. 12(2)(b)
279
  verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the reference database against which input data has been checked by the system'
 
287
 
288
  transparency_and_provision_of_information_to_deployers:
289
  interpretability: # Art. 13(1)
290
+ verbose: 'AI project is designed and developed to ensure operation is sufficiently transparent for deployers to interpret output and use appropriately'
291
  value: !!bool false
292
  compliance: # Art. 13(1)
293
+ verbose: 'AI project is designed and developed with transparency to ensure compliance with provider and deployer obligations in Section 3'
294
  value: !!bool false
295
  instructions: # Art. 13(2)
296
+ verbose: 'AI project is accompanied by instructions for use in appropriate digital format or otherwise, with concise, complete, correct, clear, relevant, accessible, and comprehensible information for deployers'
297
  value: !!bool false
298
  contact_details: # Art. 13(3)(a)
299
  verbose: 'Instructions include provider identity and contact details, and if applicable, authorized representative details'
300
  value: !!bool false
301
  characteristics: # Art. 13(3)(b)(i)
302
+ verbose: 'Instructions include the characteristics, capabilities, performance limitations, and intended purpose of the AI project'
303
  value: !!bool false
304
  metrics: # Art. 13(3)(b)(ii)
305
  verbose: 'Instructions include accuracy metrics, robustness, cybersecurity, and potential impacts on these'
 
317
  verbose: 'Instructions include input data specifications and relevant training, validation, and testing dataset information'
318
  value: !!bool false
319
  deployers: # Art. 13(3)(b)(vii)
320
+ verbose: 'Instructions include information to enable potential deployers to interpret and appropriately use the output of the AI project'
321
  value: !!bool false
322
  changes: # Art. 13(3)(c)
323
+ verbose: 'Instructions include predetermined changes to AI project and its performance since initial conformity assessment'
324
  value: !!bool false
325
  oversight_measures: # Art. 13(3)(d)
326
  verbose: 'Instructions include human oversight measures and technical measures for output interpretation'
 
334
 
335
  human_oversight:
336
  designed: # Art. 14(1)
337
+ verbose: 'AI project is designed and developed so as to be effectively overseen by natural persons during use, including through the use of appropriate human-machine interface tools'
338
  value: !!bool false
339
  minimize_risks: # Art. 14(2)
340
+ verbose: 'Human oversight measure of the AI project aim to prevent or minimize risks to health, safety, or fundamental rights during intended use or foreseeable misuse'
341
  value: !!bool false
342
  commensurate: # Art. 14(3)
343
+ verbose: 'The human oversight measures of the AI project are commensurate with risks, autonomy level, and use context, ensured through provider-built measures and/or deployer-implemented measures'
344
  value: !!bool false
345
  understandable: # Art. 14(4)
346
+ verbose: 'AI project enables assigned persons to understand its capacities and limitations, monitor operation, and detect anomalies'
347
  value: !!bool false
348
  automation_bias: # Art. 14(4)(a)
349
+ verbose: 'AI project enables assigned persons to be aware of potential automation bias'
350
  value: !!bool false
351
  interpretabilty: # Art. 14(4)(c)
352
+ verbose: 'AI project enables assigned persons to correctly interpret its output'
353
  value: !!bool false
354
  override: # Art. 14(4)(d)
355
+ verbose: 'AI project enables assigned persons to decide not to use it or override its output'
356
  value: !!bool false
357
  stop_button: # Art. 14(4)(e)
358
+ verbose: 'AI project enables assigned persons to intervene or halt the system through a stop button or similar procedure'
359
  value: !!bool false
360
  verification: # Art. 14(5)
361
  verbose: 'For Annex III point 1(a) systems, actions or decisions require verification by at least two competent persons, with exceptions for law enforcement, migration, border control, or asylum'
 
363
 
364
  accuracy_robustness_cybersecurity:
365
  design: # Art. 15(1)
366
+ verbose: 'The AI project is designed and developed to achieve appropriate levels of accuracy, robustness, and cybersecurity, performing consistently throughout its lifecycle'
367
  value: !!bool false
368
  metrics_in_instructions: # Art. 15(3)
369
+ verbose: 'Accuracy levels and relevant metrics are declared in instructions of use that accompany the AI project'
370
  value: !!bool false
371
  error_resiliance: # Art. 15(4)
372
+ verbose: 'The AI project is resilient against errors, faults, or inconsistencies, with technical and organizational measures implemented'
373
  value: !!bool false
374
  bias: # Art. 15(4)
375
+ verbose: 'The AI project, if it continues learning after deployment, is designed to eliminate or reduce risk of biased outputs influencing future operations'
376
  value: !!bool false
377
  unauthorized_use: # Art. 15(5)
378
+ verbose: 'The AI project is resilient against unauthorized third-party attempts to alter use, outputs, or performance'
379
  value: !!bool false
380
  cybersecurity_solutions: # Art. 15(5)
381
+ verbose: 'The AI project includes cybersecurity solutions that are appropriate to the relevant circumstances and risks'
382
  value: !!bool false
383
  ai_vulnerabilities: # Art. 15(5)
384
+ verbose: 'The AI project includes technical solutions that address AI-specific vulnerabilities, including measures against data poisoning, model poisoning, adversarial examples, and confidentiality attacks'
385
  value: !!bool false
386
 
387
  quality_management_system:
388
  quality_management_system: # Art. 17(1)(a)
389
+ verbose: 'The AI project is subject to a quality management system with strategy for regulatory compliance'
390
  value: !!bool false
391
  design: # Art. 17(1)(b)
392
+ verbose: 'The quality management system that the AI project was subject to includes techniques, procedures, and actions for design, control, and verification of the AI project'
393
  value: !!bool false
394
  quality_control: # Art. 17(1)(c)
395
+ verbose: 'The quality management system that the AI project was subject to includes techniques, procedures, and actions for development, quality control, and quality assurance'
396
  value: !!bool false
397
  testing: # Art. 17(1)(d)
398
+ verbose: 'The quality management system that the AI project was subject to includes examination, test, and validation procedures before, during, and after development'
399
  value: !!bool false
400
 
401
  fundamental_rights_assessment:
402
  process: # Art. 27(1)(a)
403
+ verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the deployer’s processes in which the AI project will be used in line with its intended purpose'
404
  value: !!bool false
405
  time_period: # Art. 27(1)(b)
406
+ value: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the period of time within which, and the frequency with which, each high-risk AI project is intended to be used'
407
  value: !!bool false
408
  persons_affected: # Art. 27(1)(c)
409
  verbose: 'AI project has been subject to a fundamental rights impact assessment that describes the categories of natural persons and groups likely to be affected by its use in the specific context'
 
421
  # Information related to the Act's requirements for all AI systems
422
 
423
  transparency_obligations:
424
+ synthetic_content: # Art. 50(1)
425
+ verbose: 'The AI project designed and developed in such a way that the natural persons concerned are informed that they are interacting with AI'
426
  synthetic_content: # Art. 50(2)
427
+ verbose: 'The outputs of the AI project are marked in a machine-readable format and detectable as artificially generated or manipulated'
428
  value: !!bool false
429
  marking_solutions: # Art. 50(2)
430
+ verbose: 'The outputs of the AI project are marked using technical solutions are effective, interoperable, robust and reliable as far as this is technically feasible, taking into account the specificities and limitations of various types of content, the costs of implementation and the generally acknowledged state of the art, as may be reflected in relevant technical standards'
431
  value: !!bool false
432
 
433
  # Information related to the Act's requirements for GPAI models
 
462
 
463
  obligations_for_gpai_models_with_systemic_risk:
464
  evaluation: # Art. 55(1)(a)
465
+ verbose: 'The AI project was subject to a model evaluation using standardized protocols'
466
+ value: !!bool false
467
+ adversarial: # Art. 55(1)(a)
468
+ verbose: 'The AI project was subject to adversarial testing'
469
+ value: !!bool false
470
+ assessment: # Art. 55(1)(b)
471
+ verbose: 'Any possible systemic risks at Union level that the AI project poses were assessed'
472
  value: !!bool false
473
  mitigation: # Art. 55(1)(b)
474
+ verbose: 'Any possible systemic risks at Union level that the AI project poses were mitigated'
475
  value: !!bool false
476
  cybersecurity: # Art. 55(1)(d)
477
+ verbose: 'Adequate cybersecurity protection for any models and infrastructures in the AI project was ensured'
478
  value: !!bool false
479
 
480