wlmbrown commited on
Commit
976503f
·
1 Parent(s): b79b88a

Remove outdated comments

Browse files
Files changed (3) hide show
  1. data_cc.yaml +5 -6
  2. model_cc.yaml +6 -16
  3. project_cc.yaml +13 -24
data_cc.yaml CHANGED
@@ -2,7 +2,7 @@ card_details:
2
  card_type: "data" # "project", "data" or "model"
3
  card_label: "data_01"
4
 
5
- # Metadata related to intended purpose(s) of data
6
 
7
  intended_purpose:
8
  safety_component:
@@ -58,7 +58,7 @@ intended_purpose:
58
  verbose: 'This dataset is appropriate to use for AI projects intended to be used by a judicial authority or on their behalf to assist a judicial authority in researching and interpreting facts and the law and in applying the law to a concrete set of facts, or to be used in a similar way in alternative dispute resolution'
59
  value: !!bool false
60
 
61
- # Metadata related to data-related requirements for high-risk AI systems
62
 
63
  high_risk_ai_system_requirements:
64
  # data governance
@@ -188,10 +188,9 @@ high_risk_ai_system_requirements:
188
  technical_documentation_cybersecurity:
189
  article: 'Art. 11; Annex IV(2)(h)'
190
  verbose: 'Cybersecurity measures were put in place as regards the data (e.g., scanning for data poisoning)'
191
- value: !!bool false
192
-
193
  transparency_and_provision_of_information_to_deployers:
194
- article: '# Art. 13(3)(b)(vi)'
195
  verbose: 'Dataset is accompanied by instructions for use that convery relevant information about it, taking into account its intended purpose'
196
  value: !!bool false
197
  quality_management_system:
@@ -199,7 +198,7 @@ high_risk_ai_system_requirements:
199
  verbose: 'Datset was subject to a quality management system that is documented in a systematic and orderly manner in the form of written policies, procedures and instructions, and includes a description of the systems and procedures for data management, including data acquisition, data collection, data analysis, data labelling, data storage, data filtration, data mining, data aggregation, data retention and any other operation regarding the data'
200
  value: !!bool false
201
 
202
- # Metadata related to data-related requirements for GPAI models
203
 
204
  gpai_model_requirements:
205
  data_type:
 
2
  card_type: "data" # "project", "data" or "model"
3
  card_label: "data_01"
4
 
5
+ # Metadata related to intended purpose(s) of data (which must align with those of overall AI project, if overall AI project is a high-risk AI system)
6
 
7
  intended_purpose:
8
  safety_component:
 
58
  verbose: 'This dataset is appropriate to use for AI projects intended to be used by a judicial authority or on their behalf to assist a judicial authority in researching and interpreting facts and the law and in applying the law to a concrete set of facts, or to be used in a similar way in alternative dispute resolution'
59
  value: !!bool false
60
 
61
+ # Metadata related to model-related requirements when AI project is a high-risk AI system
62
 
63
  high_risk_ai_system_requirements:
64
  # data governance
 
188
  technical_documentation_cybersecurity:
189
  article: 'Art. 11; Annex IV(2)(h)'
190
  verbose: 'Cybersecurity measures were put in place as regards the data (e.g., scanning for data poisoning)'
191
+ value: !!bool false
 
192
  transparency_and_provision_of_information_to_deployers:
193
+ article: 'Art. 13(3)(b)(vi)'
194
  verbose: 'Dataset is accompanied by instructions for use that convery relevant information about it, taking into account its intended purpose'
195
  value: !!bool false
196
  quality_management_system:
 
198
  verbose: 'Datset was subject to a quality management system that is documented in a systematic and orderly manner in the form of written policies, procedures and instructions, and includes a description of the systems and procedures for data management, including data acquisition, data collection, data analysis, data labelling, data storage, data filtration, data mining, data aggregation, data retention and any other operation regarding the data'
199
  value: !!bool false
200
 
201
+ # Metadata related to data-related requirements when AI project is a GPAI model
202
 
203
  gpai_model_requirements:
204
  data_type:
model_cc.yaml CHANGED
@@ -2,7 +2,7 @@ card_details:
2
  card_type: "model" # "project", "data" or "model"
3
  card_label: "model_01"
4
 
5
- # Metadata related to intended purpose(s) of model
6
 
7
  intended_purpose:
8
  safety_component:
@@ -58,19 +58,7 @@ intended_purpose:
58
  verbose: 'This model is appropriate to use for AI projects intended to be used by a judicial authority or on their behalf to assist a judicial authority in researching and interpreting facts and the law and in applying the law to a concrete set of facts, or to be used in a similar way in alternative dispute resolution'
59
  value: !!bool false
60
 
61
- # Metadata that will help us determine if the model itself is a GPAI and, therefore, must satisfy the requirements of GPAI models
62
-
63
- classification_of_gpai_models:
64
- high_impact_capabilities:
65
- article: 'Art. 51(1)(a)'
66
- verbose: 'The model has high impact capabilities evaluated on the basis of appropriate technical tools and methodologies, including indicators and benchmarks'
67
- value: !!bool false
68
- flops:
69
- article: 'Art. 51(2)'
70
- verbose: 'The cumulative compute used for training the model, as measured in floating point operations (FLOPs), was greater than 10^25.'
71
- value: !!bool false
72
-
73
- # Metadata related to model-related requirements for high-risk AI systems
74
 
75
  high_risk_ai_system_requirements:
76
  risk_management_system_general:
@@ -181,7 +169,7 @@ high_risk_ai_system_requirements:
181
  article: 'Art. 13(3)(e)'
182
  verbose: 'Model is accompanied by instructions for use that include computational and hardware resources needed, the expected lifetime of the model and any necessary maintenance and care measures, including their frequency, to ensure the proper functioning of that model, including as regards software updates'
183
  value: !!bool false
184
- accuracy_robustness_cybersecurity_accuracy: # These need to be cleaned up and to match/compliment project cc
185
  article: 'Art. 15(1)'
186
  verbose: 'Model is designed and developed to achieve appropriate level of accuracy'
187
  value: !!bool false
@@ -210,7 +198,7 @@ high_risk_ai_system_requirements:
210
  verbose: 'Examination, test and validation procedures to be carried out before, during and after the development of the high-risk AI system, and the frequency with which they have to be carried out'
211
  value: !!bool false
212
 
213
- # Metadata related to model-related requirements for GPAI models
214
 
215
  gpai_model_requirements:
216
  task:
@@ -298,6 +286,8 @@ gpai_model_requirements:
298
  verbose: 'Where applicable, detailed description of the measures put in place for the purpose of conducting internal and/or external adversarial testing (e.g. red teaming), model adaptations, including alignment and fine-tuning'
299
  value: !!bool false
300
 
 
 
301
  gpai_model_with_systemic_risk_requirements:
302
  evaluation:
303
  article: 'Art. 55(1)(a)'
 
2
  card_type: "model" # "project", "data" or "model"
3
  card_label: "model_01"
4
 
5
+ # Metadata related to intended purpose(s) of model (which must align with those of overall AI project, if overall AI project is a high-risk AI system)
6
 
7
  intended_purpose:
8
  safety_component:
 
58
  verbose: 'This model is appropriate to use for AI projects intended to be used by a judicial authority or on their behalf to assist a judicial authority in researching and interpreting facts and the law and in applying the law to a concrete set of facts, or to be used in a similar way in alternative dispute resolution'
59
  value: !!bool false
60
 
61
+ # Metadata related to model-related requirements when AI project is a high-risk AI system
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  high_risk_ai_system_requirements:
64
  risk_management_system_general:
 
169
  article: 'Art. 13(3)(e)'
170
  verbose: 'Model is accompanied by instructions for use that include computational and hardware resources needed, the expected lifetime of the model and any necessary maintenance and care measures, including their frequency, to ensure the proper functioning of that model, including as regards software updates'
171
  value: !!bool false
172
+ accuracy_robustness_cybersecurity_accuracy: # These must match/compliment project cc
173
  article: 'Art. 15(1)'
174
  verbose: 'Model is designed and developed to achieve appropriate level of accuracy'
175
  value: !!bool false
 
198
  verbose: 'Examination, test and validation procedures to be carried out before, during and after the development of the high-risk AI system, and the frequency with which they have to be carried out'
199
  value: !!bool false
200
 
201
+ # Metadata related to model-related requirements when AI project is a GPAI model
202
 
203
  gpai_model_requirements:
204
  task:
 
286
  verbose: 'Where applicable, detailed description of the measures put in place for the purpose of conducting internal and/or external adversarial testing (e.g. red teaming), model adaptations, including alignment and fine-tuning'
287
  value: !!bool false
288
 
289
+ # Metadata related to model-related requirements when AI project is a GPAI model with systemic risk
290
+
291
  gpai_model_with_systemic_risk_requirements:
292
  evaluation:
293
  article: 'Art. 55(1)(a)'
project_cc.yaml CHANGED
@@ -1,10 +1,10 @@
1
-
2
- # Information related to high-level characteristics of AI project, including the role of the operator, their location, and where the output is used
3
  card_details:
4
  card_type: "project" # "project", "data" or "model"
5
  card_label: "project"
6
-
7
- # TODO potentially add scenarios that get the provider off the hook per Article 25
 
 
8
 
9
  operator_details:
10
  provider:
@@ -44,7 +44,7 @@ gpai_model:
44
 
45
  # Information related to whether or not the project, if an AI system, is a high-risk AI system
46
 
47
- # TODO Potentially add all of the use cases mentioned in Annex I below (those covered by existing regulations) rather than bundle them up as they are now
48
 
49
  high_risk_ai_system:
50
  safety_component:
@@ -100,8 +100,6 @@ high_risk_ai_system:
100
  verbose: 'AI project is intended to be used by a judicial authority or on their behalf to assist a judicial authority in researching and interpreting facts and the law and in applying the law to a concrete set of facts, or to be used in a similar way in alternative dispute resolution'
101
  value: !!bool false
102
 
103
- # TODO potentially add military exception and/or law enforcement exception to the below
104
-
105
  high_risk_ai_system_exceptions:
106
  filter_exception_rights:
107
  article: 'Art. 6(3)'
@@ -141,10 +139,14 @@ gpai_model_systemic_risk:
141
  # Information related to the Act's exceptions for scientific research, open-source AI, and more
142
 
143
  excepted:
144
- military: # only applies to AI systems, must implement that in the logic
145
  article: 'Art. 2(3)'
146
  verbose: 'AI project is placed on the market, put into service, or used with or without modification exclusively for military, defence or national security purposes'
147
  value: !!bool false
 
 
 
 
148
  scientific:
149
  article: 'Art. 2(6)'
150
  verbose: 'AI project is or was specifically developed and put into service for the sole purpose of scientific research and development'
@@ -162,11 +164,6 @@ excepted:
162
  verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
163
  value: !!bool false
164
 
165
- # We have to remember that even when open_source_gpai_model == true, the exception does not exist if gpai_model_with_systemic_risk == true
166
- # Also, even when open_source_gpai_model == true, the project must still satisfy [Article 53(1)(c), 53(1)(d)]:
167
- # In other words, project_cc_yaml['gpai_model_obligations']['other']['policy'] and project_cc_yaml['gpai_model_obligations']['other']['content'] must be true
168
- # This logic has to be added to compliance_analysis.py
169
-
170
  # Information related to practices prohibited by the Act
171
 
172
  prohibited_practice:
@@ -333,12 +330,6 @@ technical_documentation:
333
  verbose: 'The AI project is either not related to a product covered by the Union harmonisation legislation listed in Section A of Annex I and placed on the market or put into service or, if it is, a single set of technical documentation has been drawn up containing all the information set out in paragraph 1, as well as the information required under those legal acts'
334
  value: !!bool false
335
 
336
-
337
- ### ypi: add Annex IV(1)(h)
338
- ### instructions_for_use:
339
- ### verbose: 'The Technical Documentation includes a general description of the AI project that covers instructions for use for the deployer and a basic description of the user-interface provided, where applicable.'
340
- ### value: !!bool false
341
-
342
  # Information related to the record keeping requirements for high-risk AI systems (Article 12)
343
 
344
  record_keeping:
@@ -388,7 +379,7 @@ transparency_and_provision_of_information_to_deployers:
388
  value: !!bool false
389
  compliance:
390
  article: 'Art. 13(1)'
391
- verbose: 'AI project is designed and developed with transparency to ensure compliance with provider and deployer obligations in Section 3'
392
  value: !!bool false
393
  instructions:
394
  article: 'Art. 13(2)'
@@ -540,7 +531,7 @@ quality_management_system:
540
  fundamental_rights_assessment:
541
  process:
542
  article: 'Art. 27(1)(a)'
543
- verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the deployer’s processes in which the AI project will be used in line with its intended purpose'
544
  value: !!bool false
545
  time_period:
546
  article: 'Art. 27(1)(b)'
@@ -682,8 +673,6 @@ gpai_model_obligations:
682
  verbose: 'Prior to placing the AI project on the Union market, the provider, if established in a third country, has, by written mandate, appointed an authorised representative which is established in the Union.'
683
  value: !!bool false
684
 
685
- # Add other obligations of domestic represenatitives
686
-
687
  # Information related to the Act's requirements for GPAI models with systematic risk
688
 
689
  gpai_models_with_systemic_risk_obligations:
@@ -756,7 +745,7 @@ additional_provider_obligations:
756
 
757
  # Information related to the additional transparency requirements in Article 50, if applicable
758
 
759
- # TODO the requirements seem to be missing from the below. We also need to work on the logic, which is complex.
760
 
761
  transparency_related:
762
  direct_user_interaction:
 
 
 
1
  card_details:
2
  card_type: "project" # "project", "data" or "model"
3
  card_label: "project"
4
+
5
+ # Information related to high-level characteristics of AI project, including the role of the operator, their location, and where the output is used
6
+
7
+ # TODO add some attributes related to Article 25, which captures scenarios where the provider is no longer responsible for some requirements
8
 
9
  operator_details:
10
  provider:
 
44
 
45
  # Information related to whether or not the project, if an AI system, is a high-risk AI system
46
 
47
+ # TODO Unbundle the use cases mentioned in Annex I
48
 
49
  high_risk_ai_system:
50
  safety_component:
 
100
  verbose: 'AI project is intended to be used by a judicial authority or on their behalf to assist a judicial authority in researching and interpreting facts and the law and in applying the law to a concrete set of facts, or to be used in a similar way in alternative dispute resolution'
101
  value: !!bool false
102
 
 
 
103
  high_risk_ai_system_exceptions:
104
  filter_exception_rights:
105
  article: 'Art. 6(3)'
 
139
  # Information related to the Act's exceptions for scientific research, open-source AI, and more
140
 
141
  excepted:
142
+ military: # only applies to AI systems, must implement that in logic
143
  article: 'Art. 2(3)'
144
  verbose: 'AI project is placed on the market, put into service, or used with or without modification exclusively for military, defence or national security purposes'
145
  value: !!bool false
146
+ military_use: # only applies to AI systems, must implement that in logic
147
+ article: 'Art. 2(3)'
148
+ verbose: 'AI project is not placed on the market or put into service in the Union, but the output is used in the Union exclusively for military, defence or national security purposes, regardless of the type of entity carrying out those activities.'
149
+ value: !!bool false
150
  scientific:
151
  article: 'Art. 2(6)'
152
  verbose: 'AI project is or was specifically developed and put into service for the sole purpose of scientific research and development'
 
164
  verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
165
  value: !!bool false
166
 
 
 
 
 
 
167
  # Information related to practices prohibited by the Act
168
 
169
  prohibited_practice:
 
330
  verbose: 'The AI project is either not related to a product covered by the Union harmonisation legislation listed in Section A of Annex I and placed on the market or put into service or, if it is, a single set of technical documentation has been drawn up containing all the information set out in paragraph 1, as well as the information required under those legal acts'
331
  value: !!bool false
332
 
 
 
 
 
 
 
333
  # Information related to the record keeping requirements for high-risk AI systems (Article 12)
334
 
335
  record_keeping:
 
379
  value: !!bool false
380
  compliance:
381
  article: 'Art. 13(1)'
382
+ verbose: 'AI project is designed and developed with transparency to ensure compliance with provider and deployer obligations in Section 3' # replace reference to section with text
383
  value: !!bool false
384
  instructions:
385
  article: 'Art. 13(2)'
 
531
  fundamental_rights_assessment:
532
  process:
533
  article: 'Art. 27(1)(a)'
534
+ verbose: 'AI project has been subject to a fundamental rights impact assessment that includes a description of the deployer processes in which the AI project will be used in line with its intended purpose'
535
  value: !!bool false
536
  time_period:
537
  article: 'Art. 27(1)(b)'
 
673
  verbose: 'Prior to placing the AI project on the Union market, the provider, if established in a third country, has, by written mandate, appointed an authorised representative which is established in the Union.'
674
  value: !!bool false
675
 
 
 
676
  # Information related to the Act's requirements for GPAI models with systematic risk
677
 
678
  gpai_models_with_systemic_risk_obligations:
 
745
 
746
  # Information related to the additional transparency requirements in Article 50, if applicable
747
 
748
+ # TODO add requirements below and then implement logic elsewhere
749
 
750
  transparency_related:
751
  direct_user_interaction: