Spaces:
Sleeping
Sleeping
wlmbrown
commited on
Commit
·
7269140
1
Parent(s):
6571e75
Untested code covering some % of Act requirements
Browse files- compliance_analysis.py +259 -68
- data_cc.md +0 -36
- data_cc.yaml +127 -0
- model_cc.md +0 -78
- model_cc.yaml +203 -0
- project_cc.md → project_cc.yaml +236 -117
compliance_analysis.py
CHANGED
@@ -1,100 +1,306 @@
|
|
1 |
import os
|
2 |
import yaml
|
|
|
3 |
|
4 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
def create_list_of_files(folder_path):
|
6 |
for root, dirs, files in os.walk(folder_path):
|
7 |
for filename in files:
|
8 |
found_files.append(os.path.join(root, filename))
|
9 |
|
10 |
-
#Define a function that checks for a Project CC. Without this, there cannot be an analysis.
|
11 |
def check_for_project_cc(folder_path):
|
12 |
found_files = []
|
13 |
|
14 |
# Walk through the directory
|
15 |
for root, dirs, files in os.walk(folder_path):
|
16 |
for filename in files:
|
17 |
-
if filename.lower() == 'project_cc.
|
18 |
found_files.append(os.path.join(root, filename))
|
19 |
|
20 |
# Check the results
|
21 |
if len(found_files) == 0:
|
22 |
print(f"We did not find a Project CC in your folder. We cannot run a compliance analysis without a Project CC.")
|
|
|
23 |
elif len(found_files) == 1:
|
24 |
print(f"We found exactly one Project CC in your folder. Great job!:")
|
25 |
print(f" - {found_files[0]}")
|
26 |
-
run_compliance_analysis(folder_path
|
27 |
else:
|
28 |
print(f"Multiple Project CCs found:")
|
29 |
for file_path in found_files:
|
30 |
print(f" - {file_path}")
|
31 |
print("We found multiple Project CCs in your folder. There should only be one Project CC per project.")
|
32 |
|
33 |
-
def run_compliance_analysis(
|
34 |
|
35 |
-
# Load the Project CC
|
36 |
-
with open(project_cc, 'r') as file:
|
37 |
project_cc_yaml = yaml.safe_load(file)
|
38 |
|
39 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
|
|
|
|
|
42 |
|
43 |
-
|
44 |
-
for key, value in project_cc_yaml['prohibited_ai_practice_status']:
|
45 |
-
if value: # This condition will be met whereever a prohibited practice exists
|
46 |
-
print(f"You have a prohibited practice and are non-compliant with the Act")
|
47 |
-
break
|
48 |
-
else:
|
49 |
-
print("No prohibited practices found. That's good...")
|
50 |
|
51 |
-
|
52 |
-
|
53 |
-
print("The project is an AI system.")
|
54 |
|
55 |
-
|
|
|
|
|
56 |
|
57 |
-
|
58 |
-
for key in secondary_keys:
|
59 |
-
if key in secondary_data and secondary_data[key] == True:
|
60 |
-
print(f"The key '{key}' is True in the secondary file.")
|
61 |
-
else:
|
62 |
-
print(f"The key '{key}' is not True in the secondary file.")
|
63 |
-
all_true = False
|
64 |
-
|
65 |
-
if all_true:
|
66 |
-
print("All specified keys in the secondary file are True.")
|
67 |
-
else:
|
68 |
-
print("Not all specified keys in the secondary file are True.")
|
69 |
-
else:
|
70 |
-
print(f"The key '{main_key}' is not True in the main file.")
|
71 |
|
|
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
return True
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
return True
|
83 |
-
if
|
|
|
84 |
return True
|
85 |
-
else
|
|
|
86 |
return False
|
87 |
|
88 |
-
def check_data_ccs(folder_path):
|
89 |
|
90 |
-
|
91 |
-
# Check if the search word is in the filename
|
92 |
-
if "model_cc.md" in filename.lower():
|
93 |
-
# Construct the full file path
|
94 |
-
file_path = os.path.join(folder_path, filename)
|
95 |
-
|
96 |
-
# Process the file
|
97 |
-
process_file(file_path)
|
98 |
|
99 |
|
100 |
def check_all_true(file_path):
|
@@ -113,21 +319,6 @@ def check_all_true(file_path):
|
|
113 |
else:
|
114 |
print("No problems here")
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
# Example usage
|
119 |
-
main_file = 'main.yaml'
|
120 |
-
secondary_file = 'secondary.yaml'
|
121 |
-
main_key = 'data_and_data_governance'
|
122 |
-
secondary_keys = [
|
123 |
-
'Training data is relevant',
|
124 |
-
'Training data is sufficiently representative',
|
125 |
-
'Training data is, to the best extent possible, free of errors'
|
126 |
-
]
|
127 |
-
|
128 |
-
check_yaml_values(main_file, secondary_file, main_key, secondary_keys)
|
129 |
-
|
130 |
-
|
131 |
def main():
|
132 |
# Prompt the user to enter a filename
|
133 |
file_path = input("Please enter a file path to the folder containing all your AI project's Compliance Cards: ")
|
|
|
1 |
import os
|
2 |
import yaml
|
3 |
+
from enum import Enum
|
4 |
|
5 |
+
# Create some variables we will use throughout our analysis
|
6 |
+
|
7 |
+
# Type of AI project (AI system vs GPAI model)
|
8 |
+
ai_system = False
|
9 |
+
gpai_model = False
|
10 |
+
high_risk_ai_system = False
|
11 |
+
gpai_model_systematic_risk == False
|
12 |
+
|
13 |
+
# Role and location of AI project operator
|
14 |
+
provider = False
|
15 |
+
deployer = False
|
16 |
+
importer = False
|
17 |
+
distributor = False
|
18 |
+
product_manufacturer = False
|
19 |
+
eu_located = False
|
20 |
+
|
21 |
+
#EU market status
|
22 |
+
placed_on_market = False
|
23 |
+
put_into_service = False
|
24 |
+
output_used = False
|
25 |
+
|
26 |
+
#Define a function that creates a list of all the files in a provided folder. We will use this list for different things.
|
27 |
def create_list_of_files(folder_path):
|
28 |
for root, dirs, files in os.walk(folder_path):
|
29 |
for filename in files:
|
30 |
found_files.append(os.path.join(root, filename))
|
31 |
|
32 |
+
#Define a function that checks for a Project CC. Without this, there simply cannot be an analysis.
|
33 |
def check_for_project_cc(folder_path):
|
34 |
found_files = []
|
35 |
|
36 |
# Walk through the directory
|
37 |
for root, dirs, files in os.walk(folder_path):
|
38 |
for filename in files:
|
39 |
+
if filename.lower() == 'project_cc.yaml':
|
40 |
found_files.append(os.path.join(root, filename))
|
41 |
|
42 |
# Check the results
|
43 |
if len(found_files) == 0:
|
44 |
print(f"We did not find a Project CC in your folder. We cannot run a compliance analysis without a Project CC.")
|
45 |
+
sys.exit()
|
46 |
elif len(found_files) == 1:
|
47 |
print(f"We found exactly one Project CC in your folder. Great job!:")
|
48 |
print(f" - {found_files[0]}")
|
49 |
+
run_compliance_analysis(folder_path)
|
50 |
else:
|
51 |
print(f"Multiple Project CCs found:")
|
52 |
for file_path in found_files:
|
53 |
print(f" - {file_path}")
|
54 |
print("We found multiple Project CCs in your folder. There should only be one Project CC per project.")
|
55 |
|
56 |
+
def run_compliance_analysis(folder_path):
|
57 |
|
58 |
+
# Load the Project CC YAML file from the supplied folder. This will be our starting point.
|
59 |
+
with open(folder_path + 'project_cc.yaml', 'r') as file:
|
60 |
project_cc_yaml = yaml.safe_load(file)
|
61 |
|
62 |
+
# Determine project type (AI system vs. GPAI model) as well as operator type. We will use these for different things.
|
63 |
+
set_type(project_cc_yaml)
|
64 |
+
set_operator_role_and_location(projec_cc_yaml)
|
65 |
+
set_eu_market_status(project_cc_yaml)
|
66 |
+
|
67 |
+
# Check if the project is within scope of the Act. If it's not, the analysis is over.
|
68 |
+
if check_within_scope(project_cc_yaml):
|
69 |
+
print("Project is within the scope of Act. Let's continue...")
|
70 |
+
else:
|
71 |
+
sys.exit("Project is not within the scope of what is regulated by the Act.")
|
72 |
+
|
73 |
+
# Check for prohibited practices. If any exist, the analysis is over.
|
74 |
+
if check_prohibited(project_cc_yaml) == True:
|
75 |
+
print("Project contains prohibited practices and is therefore non-compliant.")
|
76 |
+
sys.exit("Project is non-compliant due to a prohibited practice.")
|
77 |
+
else:
|
78 |
+
print("Project does not contain prohibited practies. Let's continue...")
|
79 |
+
|
80 |
+
# If project is high-risk AI system, check that is has met all the requirements for such systems:
|
81 |
+
|
82 |
+
if high_risk_ai_system:
|
83 |
+
|
84 |
+
# Do this by examining the Project CC
|
85 |
+
|
86 |
+
for key, value in project_cc_yaml['risk_management_system']:
|
87 |
+
if not value:
|
88 |
+
sys.exit("Because of project-level characteristics, this high-risk AI system fails the risk management requirements under Article 9.")
|
89 |
+
for key, value in project_cc_yaml['technical_documentation']:
|
90 |
+
if not value:
|
91 |
+
sys.exit("Because of project-level characteristics, this high-risk AI system fails the risk management requirements under Article 11.")
|
92 |
+
for key, value in project_cc_yaml['record_keeping']:
|
93 |
+
if not value:
|
94 |
+
sys.exit("Because of project-level characteristics, this high-risk AI system fails the risk management requirements under Article 12.")
|
95 |
+
for key, value in project_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
96 |
+
if not value:
|
97 |
+
sys.exit("Because of project-level characteristics, this high-risk AI system fails the transparency requirements under Article 13.")
|
98 |
+
for key, value in project_cc_yaml['human_oversight']:
|
99 |
+
if not value:
|
100 |
+
sys.exit("Because of project-level characteristics, this high-risk AI system fails the human oversight requirements under Article 14.")
|
101 |
+
for key, value in project_cc_yaml['accuracy_robustness_cybersecurity']:
|
102 |
+
if not value:
|
103 |
+
sys.exit("Because of project-level characteristics, this high-risk AI system fails the accuracy, robustness, and cybersecurity requirements under Article 15.")
|
104 |
+
for key, value in project_cc_yaml['quality_management_system']:
|
105 |
+
if not value:
|
106 |
+
sys.exit("Because of project-level characteristics, this high-risk AI system fails the accuracy, robustness, and cybersecurity requirements under Article 17.")
|
107 |
+
|
108 |
+
# Do this by examining any and all Data CCs too
|
109 |
+
|
110 |
+
for filename in os.listdir(folder_path):
|
111 |
+
# Check if the search word is in the filename
|
112 |
+
if "data_cc.md" in filename.lower():
|
113 |
+
|
114 |
+
# If it is, load the yaml
|
115 |
+
|
116 |
+
with open(folder_path + filename, 'r') as file:
|
117 |
+
data_cc_yaml = yaml.safe_load(file)
|
118 |
+
|
119 |
+
for key, value in data_cc_yaml['data_and_data_governance']:
|
120 |
+
if not value:
|
121 |
+
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the data and data governance requirements under Article 10.")
|
122 |
+
for key, value in data_cc_yaml['technical_documentation']:
|
123 |
+
if not value:
|
124 |
+
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the technical documentation requirements under Article 11.")
|
125 |
+
for key, value in data_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
126 |
+
if not value:
|
127 |
+
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the transparency requirements under Article 13.")
|
128 |
+
for key, value in data_cc_yaml['quality_management_system']:
|
129 |
+
if not value:
|
130 |
+
sys.exit(f"Because of the dataset represented by {filename}, this high-risk AI system fails the quality management requirements under Article 17.")
|
131 |
+
|
132 |
+
# Do this by examining any and all Model CCs too
|
133 |
+
|
134 |
+
for filename in os.listdir(folder_path):
|
135 |
+
# Check if the search word is in the filename
|
136 |
+
if "model_cc.md" in filename.lower():
|
137 |
+
|
138 |
+
# If it is, load the yaml
|
139 |
+
|
140 |
+
with open(folder_path + filename, 'r') as file:
|
141 |
+
model_cc_yaml = yaml.safe_load(file)
|
142 |
+
|
143 |
+
for key, value in model_cc_yaml['risk_management_system']:
|
144 |
+
if not value:
|
145 |
+
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the risk management requirements under Article 9.")
|
146 |
+
for key, value in data_cc_yaml['technical_documentation']:
|
147 |
+
if not value:
|
148 |
+
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the technical documentation requirements under Article 11.")
|
149 |
+
for key, value in data_cc_yaml['transparency_and_provision_of_information_to_deployers']:
|
150 |
+
if not value:
|
151 |
+
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the transparency requirements under Article 13.")
|
152 |
+
for key, value in data_cc_yaml['accuracy_robustness_cybersecurity']:
|
153 |
+
if not value:
|
154 |
+
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the quality management requirements under Article 15.")
|
155 |
+
for key, value in data_cc_yaml['quality_management_system']:
|
156 |
+
if not value:
|
157 |
+
sys.exit(f"Because of the model represented by {filename}, this high-risk AI system fails the quality management requirements under Article 17.")
|
158 |
+
|
159 |
+
# If the project is a GPAI model, check that is has met all the requirements for such systems:
|
160 |
+
|
161 |
+
if gpai_model:
|
162 |
+
|
163 |
+
# Do this by examining the Project CC
|
164 |
+
|
165 |
+
for key, value in project_cc_yaml['gpai_model_provider_obligations']:
|
166 |
+
if not value:
|
167 |
+
sys.exit("GPAI model fails the transparency requirements under Article 53.")
|
168 |
+
|
169 |
+
# Do this by examining any and all Data CCs too
|
170 |
+
|
171 |
+
for filename in os.listdir(folder_path):
|
172 |
+
# Check if the search word is in the filename
|
173 |
+
if "data_cc.md" in filename.lower():
|
174 |
+
|
175 |
+
# If it is, load the yaml
|
176 |
+
|
177 |
+
with open(folder_path + filename, 'r') as file:
|
178 |
+
data_cc_yaml = yaml.safe_load(file)
|
179 |
+
|
180 |
+
for key, value in data_cc_yaml['gpai_requirements']['gpai_requirements']:
|
181 |
+
if not value:
|
182 |
+
sys.exit(f"Because of the dataset represented by {filename}, this GPAI fails the transparency requirements under Article 53.")
|
183 |
+
|
184 |
+
# Do this by examining any and all Model CCs too
|
185 |
|
186 |
+
for filename in os.listdir(folder_path):
|
187 |
+
# Check if the search word is in the filename
|
188 |
+
if "model_cc.md" in filename.lower():
|
189 |
|
190 |
+
# If it is, load the yaml
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
|
192 |
+
with open(folder_path + filename, 'r') as file:
|
193 |
+
model_cc_yaml = yaml.safe_load(file)
|
|
|
194 |
|
195 |
+
for key, value in model_cc_yaml['obligations_for_providers_of_gpai_models']:
|
196 |
+
if not value:
|
197 |
+
sys.exit(f"Because of the model represented by {filename}, this GPAI fails the transparency requirements under Article 53.")
|
198 |
|
199 |
+
# If the project is a GPAI model with systematic risk, check that is has additionally met all the requirements for such systems:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
|
201 |
+
if gpai_model_systematic_risk:
|
202 |
|
203 |
+
# Do this by examining the Project CC
|
204 |
+
|
205 |
+
for key, value in project_cc_yaml['gpai_obligations_for_systemic_risk_models']:
|
206 |
+
if not value:
|
207 |
+
sys.exit("GPAI model with systematic risk fails the transparency requirements under Article 55.")
|
208 |
+
|
209 |
+
# Do this by examining any and all Model CCs too
|
210 |
+
|
211 |
+
for filename in os.listdir(folder_path):
|
212 |
+
# Check if the search word is in the filename
|
213 |
+
if "model_cc.md" in filename.lower():
|
214 |
+
|
215 |
+
# If it is, load the yaml
|
216 |
+
|
217 |
+
with open(folder_path + filename, 'r') as file:
|
218 |
+
model_cc_yaml = yaml.safe_load(file)
|
219 |
+
|
220 |
+
for key, value in model_cc_yaml['obligations_for_providers_of_gpai_models_with_systemic_risk']:
|
221 |
+
if not value:
|
222 |
+
sys.exit(f"Because of the model represented by {filename}, this GPAI model with systematic risk fails the transparency requirements under Article 55.")
|
223 |
+
|
224 |
+
def set_type(project_cc):
|
225 |
+
if project_cc_yaml['ai_system']['ai_system']['value']:
|
226 |
+
ai_system = True
|
227 |
+
if project_cc_yaml['gpai_model']['ai_system']['value']:
|
228 |
+
gpai_model = True
|
229 |
+
if ai_system and gpai_model:
|
230 |
+
sys.exit("Your project cannot be both an AI system and a GPAI model. Please revise your Project CC accordingly.")
|
231 |
+
if ai_system == True:
|
232 |
+
for key, value in project_cc_yaml['high_risk_ai_system']:
|
233 |
+
if value and sum(map(bool, [project_cc_yaml['high_risk_ai_system']['filter_exception_rights'],project_cc_yaml['high_risk_ai_system']['filter_exception_narrow'],project_cc_yaml['high_risk_ai_system']['filter_exception_human'],project_cc_yaml['high_risk_ai_system']['filter_exception_deviation'], project_cc_yaml['high_risk_ai_system']['filter_exception_prep']])) < 1:
|
234 |
+
high_risk_ai_system == True
|
235 |
+
if gpai_model == True:
|
236 |
+
if project_cc_yaml['gpai_model_systematic_risk']['evaluation'] or project_cc_yaml['gpai_model_systematic_risk']['flops']:
|
237 |
+
gpai_model_systematic_risk == True
|
238 |
+
|
239 |
+
def set_operator_role_and_location(project_cc):
|
240 |
+
if project_cc_yaml['operator_role']['eu_located']['value']:
|
241 |
+
eu_located = True
|
242 |
+
if project_cc_yaml['operator_role']['provider']['value']:
|
243 |
+
provider = True
|
244 |
+
if project_cc_yaml['operator_role']['deployer']['value']:
|
245 |
+
deployer = True
|
246 |
+
if project_cc_yaml['operator_role']['importer']['value']:
|
247 |
+
importer = True
|
248 |
+
if project_cc_yaml['operator_role']['distributor']['value']:
|
249 |
+
distributor = True
|
250 |
+
if project_cc_yaml['operator_role']['product_manufacturer']['value']:
|
251 |
+
product_manufacturer = True
|
252 |
+
if ai_system and gpai_model:
|
253 |
+
sys.exit("Your project cannot be both an AI system and a GPAI model. Please revise your Project CC accordingly.")
|
254 |
+
if sum(map(bool, [provider,deployer,importer,distributor, product_manufacturer])) != 1:
|
255 |
+
sys.exit("Please specify exactly one operator role.")
|
256 |
+
|
257 |
+
def set_eu_market_status(project_cc):
|
258 |
+
if project_cc_yaml['eu_market']['placed_on_market']['value']:
|
259 |
+
placed_on_market = True
|
260 |
+
if project_cc_yaml['eu_market']['put_into_service']['value']:
|
261 |
+
put_into_service = True
|
262 |
+
if project_cc_yaml['operator_role']['output_used']['value']:
|
263 |
+
output_used == True
|
264 |
+
|
265 |
+
def check_within_scope(project_cc):
|
266 |
+
if not check_excepted(project_cc):
|
267 |
+
if provider and ((ai_system and (placed_on_market or put_into_service)) or (gpai_model and placed_on_market)): # Article 2.1(a)
|
268 |
+
return True
|
269 |
+
if deployer and eu_located: # Article 2.1(b)
|
270 |
+
return True
|
271 |
+
if (provider or deployer) and (ai_system and eu_located and output_used): # Article 2.1(c)
|
272 |
+
return True
|
273 |
+
if (importer or distributor) and ai_system: # Article 2.1(d)
|
274 |
+
return True
|
275 |
+
if product_manufacturer and ai_system and (placed_on_market or put_into_service): # Article 2.1(e)
|
276 |
+
return True
|
277 |
+
else
|
278 |
+
return False
|
279 |
+
|
280 |
+
def check_excepted(project_cc):
|
281 |
+
if project_cc_yaml['excepted']['scientific'] or project_cc_yaml['excepted']['pre_market'] or (ai_system and project_cc_yaml['excepted']['open_source_ai_system']) or (gpai_model and project_cc_yaml['excepted']['open_source_gpai_system']):
|
282 |
return True
|
283 |
+
else:
|
284 |
+
return False
|
285 |
+
|
286 |
+
def check_prohibited (project_cc):
|
287 |
+
if ai_system:
|
288 |
+
for key in project_cc_yaml['prohibited_practice']['ai_system']:
|
289 |
+
if key[value]:
|
290 |
+
print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
291 |
+
return True
|
292 |
+
if project_cc_yaml['prohibited_practice']['biometric']['categorization']:
|
293 |
+
print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
294 |
return True
|
295 |
+
if project_cc_yaml['prohibited_practice']['biometric']['real_time'] and sum(map(bool, [project_cc['prohibited_practice']['biometric']['real_time_exception_victim'],project_cc['prohibited_practice']['biometric']['real_time_exception_threat'], project_cc['prohibited_practice']['biometric']['real_time_exception_investigation']])) == 0:
|
296 |
+
print("You are engaged in a prohibited practice and thus the project is non-compliant.")
|
297 |
return True
|
298 |
+
else:
|
299 |
+
print("You are not engaged in any prohibited practices.")
|
300 |
return False
|
301 |
|
|
|
302 |
|
303 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
|
306 |
def check_all_true(file_path):
|
|
|
319 |
else:
|
320 |
print("No problems here")
|
321 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
def main():
|
323 |
# Prompt the user to enter a filename
|
324 |
file_path = input("Please enter a file path to the folder containing all your AI project's Compliance Cards: ")
|
data_cc.md
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
data_and_data_governance:
|
2 |
-
'Data sets has been subject to data governance and management practices appropriate for the intended purpose of the system': !!bool true # Art. 10(1)-(2)
|
3 |
-
'Data governance and management practices have been applied to the relevant design choices': !!bool true # Art. 10(2)(a)
|
4 |
-
'Data governance and management practices have been applied to data collection processes and the origin of data, and in the case of personal data, the original purpose of the data collection': !!bool true # Art. 10(2)(b)
|
5 |
-
'Data governance and management practices have been applied to relevant data-preparation processing operations, such as annotation, labelling, cleaning, updating, enrichment and aggregation': !!bool true # Art. 10(2)(c)
|
6 |
-
'Data governance and management practices have been applied to the formulation of assumptions, in particular with respect to the information that the data are supposed to measure and represent': !!bool true # Art. 10(2)(d)
|
7 |
-
'Data governance and management practices included an assessment of the availability, quantity and suitability of the data sets that are needed': !!bool true # Art. 10(2)(e)
|
8 |
-
'Data governance and management practices have included an examination of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations': !!bool true # Art. 10(2)(f)
|
9 |
-
'Data governance and management practices included appropriate measures to detect, prevent and mitigate possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations': !!bool true # Art. 10(2)(g)
|
10 |
-
'Data governance and management practices have included the identification of relevant data gaps or shortcomings that prevent compliance with this Regulation, and how those gaps and shortcomings can be addressed': !!bool true # Art. 10(2)(h)
|
11 |
-
'Training data is relevant': !!bool true # Art. 10(3); Rec. 67
|
12 |
-
'Training data is sufficiently representative': !!bool true # Art. 10(3); Rec. 67
|
13 |
-
'Training data is, to the best extent possible, free of errors': !!bool true # Art. 10(3); Rec. 67
|
14 |
-
'Training data is complete in view of the intended purpose of system': !!bool true # Art. 10(3); Rec. 67
|
15 |
-
'Training data possesses the appropriate statistical properties, including, where applicable, as regards the people in relation to whom the system is intended to be used': !!bool true # Art. 10(3)
|
16 |
-
'Training data takes into account, to the extent required by the intended purpose, the characteristics or elements that are particular to the specific geographical, contextual, behavioural or functional setting within which the system is intended to be used': !!bool true # Art. 10(4)
|
17 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the use of this data was strictly necessary': !!bool true # Art. 10(5)
|
18 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the use complied with appropriate safeguards for the fundamental rights and freedoms of natural persons': !!bool true # Art. 10(5)
|
19 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the use of this data satisfied the provisions set out in Regulations (EU) 2016/679 and (EU) 2018/1725 and Directive (EU) 2016/680': !!bool true # Art. 10(5)
|
20 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the bias detection and correction was not effectively fulfilled by processing other data, including synthetic or anonymised data': !!bool true # Art. 10(5)(a)
|
21 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were not subject to technical limitations on the re-use of the personal data, and state-of-the-art security and privacy-preserving measures, including pseudonymisation': !!bool true # Art. 10(5)(b)
|
22 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were subject to measures to ensure that the personal data processed are secured, protected, subject to suitable safeguards, including strict controls and documentation of the access, to avoid misuse and ensure that only authorised persons have access to those personal data with appropriate confidentiality obligations': !!bool true # Art. 10(5)(c)
|
23 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were not to be transmitted, transferred or otherwise accessed by other parties': !!bool true # Art. 10(5)(d)
|
24 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were deleted once the bias was corrected or the personal data reached the end of its retention period (whichever came first)': !!bool true # Art. 10(5)(e)
|
25 |
-
'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the records of processing activities pursuant to Regulations (EU) 2016/679 and (EU) 2018/1725 and Directive (EU) 2016/680 include the reasons why the processing of special categories of personal data was strictly necessary to detect and correct biases, and why that objective could not be achieved by processing other data': !!bool true # Art. 10(5)(f)
|
26 |
-
|
27 |
-
technical_documentation:
|
28 |
-
'Where relevant, the data requirements in terms of datasheets describing the training methodologies and techniques and the training data sets used, including a general description of these data sets, information about their provenance, scope and main characteristics; how the data was obtained and selected; labelling procedures (e.g. for supervised learning), data cleaning methodologies (e.g. outliers detection)': !!bool true # Art. 11; Annex IV(2)(d)
|
29 |
-
'Validation and testing procedures used, including information about the validation and testing data used and their main characteristics; metrics used to measure accuracy, robustness and compliance with other relevant requirements set out in Title III, Chapter 2 as well as potentially discriminatory impacts; test logs and all test reports dated and signed by the responsible persons, including with regard to predetermined changes as referred to under point (f)': !!bool true # Art. 11; Annex IV(2)(g)
|
30 |
-
'Cybersecurity measures put in place as regards the data (e.g., scanning for data poisoning)': !!bool true # Art. 11; Annex IV(2)(h)
|
31 |
-
|
32 |
-
transparency_and_provision_of_information_to_deployers:
|
33 |
-
'Specifications for the input data, or any other relevant information in terms of the training, validation and testing data sets used, taking into account the intended purpose of the AI system': !!bool true # Art. 13(3)(b)(vi)
|
34 |
-
|
35 |
-
quality_management_system:
|
36 |
-
'Systems and procedures for data management, including data acquisition, data collection, data analysis, data labelling, data storage, data filtration, data mining, data aggregation, data retention and any other operation regarding the data that is performed before and for the purposes of the placing on the market or putting into service of high-risk AI systems': !!bool true # Art. 17(1)(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_cc.yaml
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
data_and_data_governance:
|
2 |
+
data_governance: # Art. 10(1)-(2)
|
3 |
+
verbose: 'Data sets has been subject to data governance and management practices appropriate for the intended purpose of the system'
|
4 |
+
value: !!bool false
|
5 |
+
design_choices: # Art. 10(2)(a)
|
6 |
+
verbose: 'Data governance and management practices have been applied to the relevant design choices'
|
7 |
+
value: !!bool false
|
8 |
+
data_origin: # Art. 10(2)(b)
|
9 |
+
verbose: 'Data governance and management practices have been applied to data collection processes and the origin of data, and in the case of personal data, the original purpose of the data collection'
|
10 |
+
value: !!bool false
|
11 |
+
data_preparation: # Art. 10(2)(c)
|
12 |
+
verbose: 'Data governance and management practices have been applied to relevant data-preparation processing operations, such as annotation, labelling, cleaning, updating, enrichment and aggregation'
|
13 |
+
value: !!bool false
|
14 |
+
data_assumptions: # Art. 10(2)(d)
|
15 |
+
verbose: 'Data governance and management practices have been applied to the formulation of assumptions, in particular with respect to the information that the data are supposed to measure and represent'
|
16 |
+
value: !!bool false
|
17 |
+
data_quantity: # Art. 10(2)(e)
|
18 |
+
verbose: 'Data governance and management practices included an assessment of the availability, quantity and suitability of the data sets that are needed'
|
19 |
+
value: !!bool false
|
20 |
+
data_bias_examination: # Art. 10(2)(f)
|
21 |
+
verbose: 'Data governance and management practices have included an examination of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations'
|
22 |
+
value: !!bool false
|
23 |
+
data_bias_mitigation: # Art. 10(2)(g)
|
24 |
+
verbose: 'Data governance and management practices included appropriate measures to detect, prevent and mitigate possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations'
|
25 |
+
value: !!bool false
|
26 |
+
data_compliance: # Art. 10(2)(h)
|
27 |
+
verbose: 'Data governance and management practices have included the identification of relevant data gaps or shortcomings that prevent compliance with this Regulation, and how those gaps and shortcomings can be addressed'
|
28 |
+
value: !!bool false
|
29 |
+
data_relevance: # Art. 10(3); Rec. 67
|
30 |
+
verbose: 'Training data is relevant'
|
31 |
+
value: !!bool false
|
32 |
+
data_representativity: # Art. 10(3); Rec. 67
|
33 |
+
verbose: 'Training data is sufficiently representative'
|
34 |
+
value: !!bool false
|
35 |
+
data_errors: # Art. 10(3); Rec. 67
|
36 |
+
verbose: 'Training data is, to the best extent possible, free of errors'
|
37 |
+
value: !!bool false
|
38 |
+
data_completeness: # Art. 10(3); Rec. 67
|
39 |
+
verbose: 'Training data is complete in view of the intended purpose of system'
|
40 |
+
value: !!bool false
|
41 |
+
statistical_properties: # Art. 10(3)
|
42 |
+
verbose: 'Training data possesses the appropriate statistical properties, including, where applicable, as regards the people in relation to whom the system is intended to be used'
|
43 |
+
value: !!bool false
|
44 |
+
contextual: # Art. 10(4)
|
45 |
+
verbose: 'Training data takes into account, to the extent required by the intended purpose, the characteristics or elements that are particular to the specific geographical, contextual, behavioural or functional setting within which the system is intended to be used'
|
46 |
+
value: !!bool false
|
47 |
+
personal_data_necessary: # Art. 10(5)
|
48 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the use of this data was strictly necessary'
|
49 |
+
value: !!bool false
|
50 |
+
personal_data_safeguards: # Art. 10(5)
|
51 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the use complied with appropriate safeguards for the fundamental rights and freedoms of natural persons'
|
52 |
+
value: !!bool false
|
53 |
+
personal_data_gdpr: # Art. 10(5)
|
54 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the use of this data satisfied the provisions set out in Regulations (EU) 2016/679 and (EU) 2018/1725 and Directive (EU) 2016/680'
|
55 |
+
value: !!bool false
|
56 |
+
personal_data_other_options: # Art. 10(5)(a)
|
57 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the bias detection and correction was not effectively fulfilled by processing other data, including synthetic or anonymised data'
|
58 |
+
value: !!bool false
|
59 |
+
personal_data_limitations: # Art. 10(5)(b)
|
60 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were not subject to technical limitations on the re-use of the personal data, and state-of-the-art security and privacy-preserving measures, including pseudonymisation'
|
61 |
+
value: !!bool false
|
62 |
+
personal_data_controls: # Art. 10(5)(c)
|
63 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were subject to measures to ensure that the personal data processed are secured, protected, subject to suitable safeguards, including strict controls and documentation of the access, to avoid misuse and ensure that only authorised persons have access to those personal data with appropriate confidentiality obligations'
|
64 |
+
value: !!bool false
|
65 |
+
personal_data_access: # Art. 10(5)(d)
|
66 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were not to be transmitted, transferred or otherwise accessed by other parties'
|
67 |
+
value: !!bool false
|
68 |
+
personal_data_deletion: # Art. 10(5)(e)
|
69 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the special categories of personal data were deleted once the bias was corrected or the personal data reached the end of its retention period (whichever came first)'
|
70 |
+
value: !!bool false
|
71 |
+
personal_data_necessary: # Art. 10(5)(f)
|
72 |
+
verbose: 'Where special categories of personal data have been used to ensure the detection and correction of possible biases that are likely to affect the health and safety of persons, have a negative impact on fundamental rights or lead to discrimination prohibited under Union law, especially where data outputs influence inputs for future operations, the records of processing activities pursuant to Regulations (EU) 2016/679 and (EU) 2018/1725 and Directive (EU) 2016/680 include the reasons why the processing of special categories of personal data was strictly necessary to detect and correct biases, and why that objective could not be achieved by processing other data'
|
73 |
+
value: !!bool false
|
74 |
+
|
75 |
+
technical_documentation:
|
76 |
+
general_description: # Art. 11; Annex IV(2)(d)
|
77 |
+
verbose: 'Dataset carries technical documention, such as a dataseet, including a general description of the dataset."
|
78 |
+
value: !!bool false
|
79 |
+
provenance: # Art. 11; Annex IV(2)(d)
|
80 |
+
verbose: 'Dataset carries technical documention, such as a dataseet, including information about its provenance'
|
81 |
+
value: !!bool false
|
82 |
+
scope: # Art. 11; Annex IV(2)(d)
|
83 |
+
verbose: 'Dataset carries technical documention, such as a dataseet, including information about scope and main characteristics'
|
84 |
+
value: !!bool false
|
85 |
+
origins: # Art. 11; Annex IV(2)(d)
|
86 |
+
verbose: 'Dataset carries technical documention, such as a dataseet, including information about how the data was obtained and selected'
|
87 |
+
value: !!bool false
|
88 |
+
labelling: # Art. 11; Annex IV(2)(d)
|
89 |
+
verbose: 'Dataset carries technical documention, such as a dataseet, including information about labelling procedures (e.g. for supervised learning)'
|
90 |
+
value: !!bool false
|
91 |
+
cleaning: # Art. 11; Annex IV(2)(d)
|
92 |
+
verbose: 'Dataset carries technical documention, such as a dataseet, including information about data cleaning methodologies (e.g. outliers detection)'
|
93 |
+
value: !!bool false
|
94 |
+
cybersecurity: # Art. 11; Annex IV(2)(h)
|
95 |
+
verbose: 'Cybersecurity measures put in place as regards the data (e.g., scanning for data poisoning)'
|
96 |
+
value: !!bool false
|
97 |
+
|
98 |
+
transparency_and_provision_of_information_to_deployers: # Art. 13(3)(b)(vi)
|
99 |
+
transparency_and_provision_of_information_to_deployers:
|
100 |
+
verbose: 'Specifications for the input data, or any other relevant information in terms of the training, validation and testing data sets used, taking into account the intended purpose of the AI system'
|
101 |
+
value: !!bool false
|
102 |
+
|
103 |
+
quality_management_system: # Art. 17(1)(f)
|
104 |
+
quality_management_system:
|
105 |
+
verbose: 'Systems and procedures for data management, including data acquisition, data collection, data analysis, data labelling, data storage, data filtration, data mining, data aggregation, data retention and any other operation regarding the data that is performed before and for the purposes of the placing on the market or putting into service of high-risk AI systems'
|
106 |
+
value: !!bool false
|
107 |
+
|
108 |
+
gpai_requirements: # Art. 53(1); Annex XI(2)(c)
|
109 |
+
gpai_requirements:
|
110 |
+
data_type:
|
111 |
+
verbose: 'Documentation for the dataset is available that contains the type of data'
|
112 |
+
value: !!bool false
|
113 |
+
data_provenance:
|
114 |
+
verbose: 'Documentation for the dataset is available that contains the provenance of data'
|
115 |
+
value: !!bool false
|
116 |
+
data_curation:
|
117 |
+
verbose: 'Documentation for the dataset is available that contains the curation methodologies (e.g. cleaning, filtering, etc.)'
|
118 |
+
value: !!bool false
|
119 |
+
data_number:
|
120 |
+
verbose: 'Documentation for the dataset is available that contains the number of data points'
|
121 |
+
value: !!bool false
|
122 |
+
data_scope:
|
123 |
+
verbose: 'Documentation for the dataset is available that contains the number of data scope and main characteristics'
|
124 |
+
value: !!bool false
|
125 |
+
data_origin:
|
126 |
+
verbose: 'Documentation for the dataset is available that contains information on how the data was obtained and selected as well as all other measures to detect the unsuitability of data sources and methods to detect identifiable biases'
|
127 |
+
value: !!bool false
|
model_cc.md
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
risk_management_system:
|
2 |
-
'Known or reasonably foreseeable risks the model can pose to health or safety when used for intended purpose': !!bool true # Art. 9(2)(a)
|
3 |
-
'Estimation and evaluation of risks when model used for intended purpose': !!bool true # Art. 9(2)(b)
|
4 |
-
'Estimation and evaluation of risks when model used under conditions of reasonably foreseeable misuse': !!bool true # Art. 9(2)(b)
|
5 |
-
'Testing to ensure model performs consistently for intended purpose': !!bool true # Art. 9(6)
|
6 |
-
'Testing to ensure model complies with Act': !!bool true # Art. 9(6)
|
7 |
-
'Testing against prior defined metrics appropriate to intended purpose': !!bool true # Art. 9(8)
|
8 |
-
'Testing against probabilistic thresholds appropriate to intended purpose': !!bool true # Art. 9(8)
|
9 |
-
|
10 |
-
technical_documentation:
|
11 |
-
'Pre-trained elements of model provided by third parties and how used, integrated or modified': !!bool true # Art. 11; Annex IV(2)(a)
|
12 |
-
'General logic of model': !!bool true # Art. 11; Annex IV(2)(b)
|
13 |
-
'Key design choices including rationale and assumptions made, including with regard to persons or groups on which model intended to be used': !!bool true # Art. 11; Annex IV(2)(b)
|
14 |
-
'Main classification choices': !!bool true # Art. 11; Annex IV(2)(b)
|
15 |
-
'What model is designed to optimise for and relevance of its different parameters': !!bool true # Art. 11; Annex IV(2)(b)
|
16 |
-
'Description of the expected output and output quality of the system': !!bool true # Art. 11; Annex IV(2)(b)
|
17 |
-
'Decisions about any possible trade-off made regarding the technical solutions adopted to comply with the requirements set out in Title III, Chapter 2': !!bool true # Art. 11; Annex IV(2)(b)
|
18 |
-
'Assessment of the human oversight measures needed in accordance with Article 14, including an assessment of the technical measures needed to facilitate the interpretation of the outputs of AI systems by the deployers, in accordance with Articles 13(3)(d)': !!bool true # Art. 11; Annex IV(2)(e)
|
19 |
-
'Validation and testing procedures used, including information about the validation and testing data used and their main characteristics; metrics used to measure accuracy, robustness and compliance with other relevant requirements set out in Title III, Chapter 2 as well as potentially discriminatory impacts; test logs and all test reports dated and signed by the responsible persons, including with regard to predetermined changes as referred to under point (f)': !!bool true # Art. 11; Annex IV(2)(g)
|
20 |
-
'Cybersecurity measures put in place': !!bool true # Art. 11; Annex IV(2)(h)
|
21 |
-
|
22 |
-
transparency_and_information_provision:
|
23 |
-
'Intended purpose': !!bool true # Art. 13(3)(b)(i)
|
24 |
-
'Level of accuracy, including its metrics, robustness and cybersecurity referred to in Article 15 against which the high-risk AI system has been tested and validated and which can be expected, and any known and foreseeable circumstances that may have an impact on that expected level of accuracy, robustness and cybersecurity': !!bool true # Art. 13(3)(b)(ii)
|
25 |
-
'Any known or foreseeable circumstance, related to the use of the high-risk AI system in accordance with its intended purpose or under conditions of reasonably foreseeable misuse, which may lead to risks to the health and safety or fundamental rights referred to in Article 9(2)': !!bool true # Art. 13(3)(b)(iii)
|
26 |
-
'Technical capabilities and characteristics of the AI system to provide information that is relevant to explain its output': !!bool true # Art. 13(3)(b)(iv)
|
27 |
-
'Performance regarding specific persons or groups of persons on which the system is intended to be used': !!bool true # Art. 13(3)(b)(v)
|
28 |
-
'Specifications for the input data, or any other relevant information in terms of the training, validation and testing data sets used, taking into account the intended purpose of the AI system': !!bool true # Art. 13(3)(b)(vi)
|
29 |
-
'Information to enable deployers to interpret the output of the high-risk AI system and use it appropriately': !!bool true # Art. 13(3)(b)(vii)
|
30 |
-
'Human oversight measures referred to in Article 14, including the technical measures put in place to facilitate the interpretation of the outputs of AI systems by the deployers': !!bool true # Art. 13(3)(d)
|
31 |
-
'Computational and hardware resources needed, the expected lifetime of the high-risk AI system and any necessary maintenance and care measures, including their frequency, to ensure the proper functioning of that AI system, including as regards software updates': !!bool true # Art. 13(3)(e)
|
32 |
-
|
33 |
-
accuracy_robustness_cybersecurity:
|
34 |
-
'Appropriate level of accuracy': !!bool true # Art. 15(1)
|
35 |
-
'Appropriate level of robustness': !!bool true # Art. 15(1)
|
36 |
-
'Appropriate level of cybersecurity': !!bool true # Art. 15(1)
|
37 |
-
'Use of relevant accuracy metrics': !!bool true # Art. 15(2)
|
38 |
-
'Maximum possible resilience regarding errors, faults or inconsistencies that may occur within the system or the environment in which the system operates, in particular due to their interaction with natural persons or other systems. Technical and organisational measures shall be taken towards this regard': !!bool true # Art. 15(4)
|
39 |
-
'Measures to prevent, detect, respond to, resolve and control for attacks trying to manipulate the training dataset (data poisoning), or pre-trained components used in training (model poisoning), inputs designed to cause the model to make a mistake (adversarial examples or model evasion), confidentiality attacks or model flaws': !!bool true # Art. 15(5)
|
40 |
-
|
41 |
-
quality_management_system:
|
42 |
-
'Examination, test and validation procedures to be carried out before, during and after the development of the high-risk AI system, and the frequency with which they have to be carried out': !!bool true # Art. 17(1)(d)
|
43 |
-
|
44 |
-
transparency_obligations:
|
45 |
-
'Providers of AI systems, including GPAI systems, generating synthetic audio, image, video or text content, shall ensure the outputs of the AI system are marked in a machine-readable format and detectable as artificially generated or manipulated': !!bool true # Art. 50(2)
|
46 |
-
'Providers shall ensure their technical solutions are effective, interoperable, robust and reliable as far as this is technically feasible, taking into account specificities and limitations of different types of content, costs of implementation and the generally acknowledged state-of-the-art, as may be reflected in relevant technical standards': !!bool true # Art. 50(2)
|
47 |
-
|
48 |
-
classification_of_gpai_models:
|
49 |
-
'Whether model has high impact capabilities evaluated on the basis of appropriate technical tools and methodologies, including indicators and benchmarks': !!bool true # Art. 51(1)(a)
|
50 |
-
'Cumulative compute used for training measured in floating point operations (FLOPs)': !!bool true # Art. 51(2)
|
51 |
-
|
52 |
-
obligations_for_providers_of_gpai_models:
|
53 |
-
'The tasks that the model is intended to perform and the type and nature of AI systems in which it can be integrated': !!bool true # Art. 53; Annex XI(1)(1)(a)
|
54 |
-
'Acceptable use policies applicable': !!bool true # Art. 53; Annex XI(1)(1)(b)
|
55 |
-
'The date of release and methods of distribution': !!bool true # Art. 53; Annex XI(1)(1)(c)
|
56 |
-
'The architecture and number of parameters': !!bool true # Art. 53; Annex XI(1)(1)(d)
|
57 |
-
'Modality (e.g. text, image) and format of inputs and outputs': !!bool true # Art. 53; Annex XI(1)(1)(e)
|
58 |
-
'The license': !!bool true # Art. 53; Annex XI(1)(1)(f)
|
59 |
-
'Training methodologies and techniques': !!bool true # Art. 53; Annex XI(1)(2)(b)
|
60 |
-
'Key design choices including the rationale and assumptions made': !!bool true # Art. 53; Annex XI(1)(2)(b)
|
61 |
-
'What the model is designed to optimise for': !!bool true # Art. 53; Annex XI(1)(2)(b)
|
62 |
-
'The relevance of the different parameters, as applicable': !!bool true # Art. 53; Annex XI(1)(2)(b)
|
63 |
-
'Information on the data used for training, testing and validation: type of data': !!bool true # Art. 53; Annex XI(1)(2)(c)
|
64 |
-
'Information on the data used for training, testing and validation: provenance of data': !!bool true # Art. 53; Annex XI(1)(2)(c)
|
65 |
-
'Information on the data used for training: curation methodologies (e.g. cleaning, filtering etc)': !!bool true # Art. 53; Annex XI(1)(2)(c)
|
66 |
-
'Information on the data used for training: the number of data points': !!bool true # Art. 53; Annex XI(1)(2)(c)
|
67 |
-
'Information on the data used for training: data points scope and main characteristics applicable': !!bool true # Art. 53; Annex XI(1)(2)(c)
|
68 |
-
'Information on the data used for training: how the data was obtained and selected': !!bool true # Art. 53; Annex XI(1)(2)(c)
|
69 |
-
'Information on the data used for training: all other measures to detect the unsuitability of data sources and methods to detect identifiable biases, where applicable': !!bool true # Art. 53; Annex XI(1)(2)(c)
|
70 |
-
'The computational resources used to train the model (e.g. number of floating point operations – FLOPs), training time, and other relevant details related to the training': !!bool true # Art. 53; Annex XI(1)(2)(d)
|
71 |
-
'Known or estimated energy consumption of the model; in case not known, this could be based on information about computational resources used': !!bool true # Art. 53; Annex XI(1)(2)(e)
|
72 |
-
'Detailed description of the evaluation strategies, including evaluation results, on the basis of available public evaluation protocols and tools or otherwise of other evaluation methodologies. Evaluation strategies shall include evaluation criteria, metrics and the methodology on the identification of limitations': !!bool true # Art. 53; Annex XI(2)(1)
|
73 |
-
'Where applicable, detailed description of the measures put in place for the purpose of conducting internal and/or external adversarial testing (e.g. red teaming), model adaptations, including alignment and fine-tuning': !!bool true # Art. 53; Annex XI(2)(2)
|
74 |
-
|
75 |
-
obligations_for_providers_of_gpai_models_with_systemic_risk:
|
76 |
-
'Perform model evaluation in accordance with standardised protocols and tools reflecting the state of the art, including conducting and documenting adversarial testing of the model with a view to identify and mitigate systemic risk': !!bool true # Art. 55(1)(a)
|
77 |
-
'Assess and mitigate possible systemic risks at Union level, including their sources, that may stem from the development': !!bool true # Art. 55(1)(b)
|
78 |
-
'Ensure an adequate level of cybersecurity protection for the GPAI model with systemic risk and the physical infrastructure of the mode': !!bool true # Art. 55(1)(d)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_cc.yaml
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
risk_management_system:
|
2 |
+
foreseeable_risks: # Art. 9(2)(a)
|
3 |
+
verbose: 'Known or reasonably foreseeable risks the model can pose to health or safety when used for intended purpose'
|
4 |
+
value: !!bool false
|
5 |
+
evaluation: # Art. 9(2)(b)
|
6 |
+
verbose: 'Estimation and evaluation of risks when model used for intended purpose'
|
7 |
+
value: !!bool false
|
8 |
+
misuse: # Art. 9(2)(b)
|
9 |
+
verbose: 'Estimation and evaluation of risks when model used under conditions of reasonably foreseeable misuse'
|
10 |
+
value: !!bool false
|
11 |
+
testing_performance: # Art. 9(6)
|
12 |
+
verbose: 'Testing to ensure model performs consistently for intended purpose'
|
13 |
+
value: !!bool false
|
14 |
+
testing_compliance: # Art. 9(6)
|
15 |
+
verbose: 'Testing to ensure model complies with Act'
|
16 |
+
value: !!bool false
|
17 |
+
testing_benchmark: # Art. 9(8)
|
18 |
+
verbose: 'Testing against prior defined metrics appropriate to intended purpose'
|
19 |
+
value: !!bool false
|
20 |
+
testing_probabilistic: # Art. 9(8)
|
21 |
+
verbose: 'Testing against probabilistic thresholds appropriate to intended purpose'
|
22 |
+
value: !!bool false
|
23 |
+
|
24 |
+
technical_documentation:
|
25 |
+
pre_trained_elements: # Art. 11; Annex IV(2)(a)
|
26 |
+
verbose: 'Model has technical documentation that describes pre-trained elements of model provided by third parties and how used, integrated or modified'
|
27 |
+
value: !!bool false
|
28 |
+
logic: # Art. 11; Annex IV(2)(b)
|
29 |
+
verbose: 'Model has technical documentation that describes general logic of model'
|
30 |
+
value: !!bool false
|
31 |
+
design_choices: # Art. 11; Annex IV(2)(b)
|
32 |
+
verbose: 'Model has technical documentation that describes key design choices including rationale and assumptions made, including with regard to persons or groups on which model intended to be used'
|
33 |
+
value: !!bool false
|
34 |
+
classification_choices: # Art. 11; Annex IV(2)(b)
|
35 |
+
verbose: 'Model has technical documentation that describes main classification choices'
|
36 |
+
value: !!bool false
|
37 |
+
parameters: # Art. 11; Annex IV(2)(b)
|
38 |
+
verbose: 'What model is designed to optimise for and relevance of its different parameters'
|
39 |
+
value: !!bool false
|
40 |
+
expected_output: # Art. 11; Annex IV(2)(b)
|
41 |
+
verbose: 'Description of the expected output and output quality of the system'
|
42 |
+
value: !!bool false
|
43 |
+
act_compliance: # Art. 11; Annex IV(2)(b)
|
44 |
+
verbose: 'Decisions about any possible trade-off made regarding the technical solutions adopted to comply with the requirements set out in Title III, Chapter 2'
|
45 |
+
value: !!bool false
|
46 |
+
human_oversight: # Art. 11; Annex IV(2)(e)
|
47 |
+
verbose: 'Assessment of the human oversight measures needed in accordance with Article 14, including an assessment of the technical measures needed to facilitate the interpretation of the outputs of AI systems by the deployers, in accordance with Articles 13(3)(d)'
|
48 |
+
value: !!bool false
|
49 |
+
validation: # Art. 11; Annex IV(2)(g)
|
50 |
+
verbose: 'Validation and testing procedures used, including information about the validation and testing data used and their main characteristics; metrics used to measure accuracy, robustness and compliance with other relevant requirements set out in Title III, Chapter 2 as well as potentially discriminatory impacts; test logs and all test reports dated and signed by the responsible persons, including with regard to predetermined changes as referred to under point (f)'
|
51 |
+
value: !!bool false
|
52 |
+
cybersecurity: # Art. 11; Annex IV(2)(h)
|
53 |
+
verbose: 'Cybersecurity measures put in place'
|
54 |
+
value: !!bool false
|
55 |
+
|
56 |
+
transparency_and_provision_of_information_to_deployers:
|
57 |
+
intended_purpose: # Art. 13(3)(b)(i)
|
58 |
+
verbose: 'Intended purpose'
|
59 |
+
value: !!bool false
|
60 |
+
metrics: # Art. 13(3)(b)(ii)
|
61 |
+
verbose: 'Level of accuracy, including its metrics, robustness and cybersecurity referred to in Article 15 against which the high-risk AI system has been tested and validated and which can be expected, and any known and foreseeable circumstances that may have an impact on that expected level of accuracy, robustness and cybersecurity'
|
62 |
+
value: !!bool false
|
63 |
+
foreseeable_misuse: # Art. 13(3)(b)(iii)
|
64 |
+
verbose: 'Any known or foreseeable circumstance, related to the use of the high-risk AI system in accordance with its intended purpose or under conditions of reasonably foreseeable misuse, which may lead to risks to the health and safety or fundamental rights referred to in Article 9(2)'
|
65 |
+
value: !!bool false
|
66 |
+
explainability: # Art. 13(3)(b)(iv)
|
67 |
+
verbose: 'Technical capabilities and characteristics of the AI system to provide information that is relevant to explain its output'
|
68 |
+
value: !!bool false
|
69 |
+
specific_groups: # Art. 13(3)(b)(v)
|
70 |
+
verbose: 'Performance regarding specific persons or groups of persons on which the system is intended to be used'
|
71 |
+
value: !!bool false
|
72 |
+
data: # Art. 13(3)(b)(vi)
|
73 |
+
verbose: 'Specifications for the input data, or any other relevant information in terms of the training, validation and testing data sets used, taking into account the intended purpose of the AI system'
|
74 |
+
value: !!bool false
|
75 |
+
interpretability: # Art. 13(3)(b)(vii)
|
76 |
+
verbose: 'Information to enable deployers to interpret the output of the high-risk AI system and use it appropriately'
|
77 |
+
value: !!bool false
|
78 |
+
human_oversight: # Art. 13(3)(d)
|
79 |
+
verbose: 'Human oversight measures referred to in Article 14, including the technical measures put in place to facilitate the interpretation of the outputs of AI systems by the deployers'
|
80 |
+
value: !!bool false
|
81 |
+
hardware: # Art. 13(3)(e)
|
82 |
+
verbose: 'Computational and hardware resources needed, the expected lifetime of the high-risk AI system and any necessary maintenance and care measures, including their frequency, to ensure the proper functioning of that AI system, including as regards software updates'
|
83 |
+
value: !!bool false
|
84 |
+
|
85 |
+
accuracy_robustness_cybersecurity:
|
86 |
+
accuracy: # Art. 15(1)
|
87 |
+
verbose: 'Model is designed and developed to achieve appropriate level of accuracy'
|
88 |
+
value: !!bool false
|
89 |
+
robustiness: # Art. 15(1)
|
90 |
+
verbose 'Model is designed and developed to achieve appropriate level of robustness'
|
91 |
+
value: !!bool false
|
92 |
+
cybersecurity: # Art. 15(1)
|
93 |
+
verbose: 'Model is designed and developed to achieve appropriate level of cybersecurity'
|
94 |
+
value: !!bool false
|
95 |
+
accuracy_metrics: # Art. 15(2)
|
96 |
+
verbose: 'Use of relevant accuracy metrics'
|
97 |
+
value: !!bool false
|
98 |
+
fault_resilience: # Art. 15(4)
|
99 |
+
verbose: 'Maximum possible resilience regarding errors, faults or inconsistencies that may occur within the system or the environment in which the system operates, in particular due to their interaction with natural persons or other systems. Technical and organisational measures shall be taken towards this regard'
|
100 |
+
value: !!bool false
|
101 |
+
attacks: # Art. 15(5)
|
102 |
+
verbose: 'Measures were taken to prevent, detect, respond to, resolve and control for model poisoning attacks, adversarial examples or model evasion attacks (attacks using inputs designed to cause the model to make a mistake), and confidentiality attacks or model flaws'
|
103 |
+
value: !!bool false
|
104 |
+
|
105 |
+
quality_management_system:
|
106 |
+
quality_management_system: # Art. 17(1)(d)
|
107 |
+
verbose: 'Examination, test and validation procedures to be carried out before, during and after the development of the high-risk AI system, and the frequency with which they have to be carried out'
|
108 |
+
value: !!bool false
|
109 |
+
|
110 |
+
transparency_obligations:
|
111 |
+
generates_media: # Art. 50(2)
|
112 |
+
verbose: 'AI project generates synthetic audio, image, video or text content'
|
113 |
+
value: !!bool false
|
114 |
+
marked_as_generated: # Art. 50(2)
|
115 |
+
verbose: 'outputs are marked in a machine-readable format and detectable as artificially generated or manipulated'
|
116 |
+
value: !!bool false
|
117 |
+
interoperability: # Art. 50(2)
|
118 |
+
verbose: 'Providers shall ensure their technical solutions are effective, interoperable, robust and reliable as far as this is technically feasible, taking into account specificities and limitations of different types of content, costs of implementation and the generally acknowledged state-of-the-art, as may be reflected in relevant technical standards'
|
119 |
+
value: !!bool false
|
120 |
+
|
121 |
+
classification_of_gpai_models:
|
122 |
+
high_impact_capabilities: # Art. 51(1)(a)
|
123 |
+
verbose: 'Whether model has high impact capabilities evaluated on the basis of appropriate technical tools and methodologies, including indicators and benchmarks'
|
124 |
+
value: !!bool false
|
125 |
+
flops: # Art. 51(2)
|
126 |
+
verbose: 'Cumulative compute used for training measured in floating point operations (FLOPs)'
|
127 |
+
value: !!bool false
|
128 |
+
|
129 |
+
obligations_for_providers_of_gpai_models:
|
130 |
+
task: # Art. 53; Annex XI(1)(1)(a)
|
131 |
+
verbose: 'The tasks that the model is intended to perform and the type and nature of AI systems in which it can be integrated'
|
132 |
+
value: !!bool false
|
133 |
+
acceptable_use: # Art. 53; Annex XI(1)(1)(b)
|
134 |
+
verbose: 'Acceptable use policies applicable'
|
135 |
+
value: !!bool false
|
136 |
+
release_date: # Art. 53; Annex XI(1)(1)(c)
|
137 |
+
verbose: 'The date of release and methods of distribution'
|
138 |
+
value: !!bool false
|
139 |
+
architecture: # Art. 53; Annex XI(1)(1)(d)
|
140 |
+
verbose: 'The architecture and number of parameters'
|
141 |
+
value: !!bool false
|
142 |
+
input_output_modality: # Art. 53; Annex XI(1)(1)(e)
|
143 |
+
verbos: 'Modality (e.g. text, image) and format of inputs and outputs'
|
144 |
+
value: !!bool false
|
145 |
+
license: # Art. 53; Annex XI(1)(1)(f)
|
146 |
+
verbose: 'The license'
|
147 |
+
value: !!bool false
|
148 |
+
training: # Art. 53; Annex XI(1)(2)(b)
|
149 |
+
verbose: 'Training methodologies and techniques'
|
150 |
+
value: !!bool false
|
151 |
+
design_choices: # Art. 53; Annex XI(1)(2)(b)
|
152 |
+
verbose: 'Key design choices including the rationale and assumptions made'
|
153 |
+
value: !!bool false
|
154 |
+
optimized_for: # Art. 53; Annex XI(1)(2)(b)
|
155 |
+
verbose: 'What the model is designed to optimise for'
|
156 |
+
value: !!bool false
|
157 |
+
parameters: # Art. 53; Annex XI(1)(2)(b)
|
158 |
+
verbose: 'The relevance of the different parameters, as applicable'
|
159 |
+
value: !!bool false
|
160 |
+
data_type: # Art. 53; Annex XI(1)(2)(c)
|
161 |
+
verbose: 'Information on the data used for training, testing and validation: type of data'
|
162 |
+
value: !!bool false
|
163 |
+
data_provenance: # Art. 53; Annex XI(1)(2)(c)
|
164 |
+
verbose: 'Information on the data used for training, testing and validation: provenance of data'
|
165 |
+
value: !!bool false
|
166 |
+
data_curation: # Art. 53; Annex XI(1)(2)(c)
|
167 |
+
verbose: 'Information on the data used for training: curation methodologies (e.g. cleaning, filtering etc)'
|
168 |
+
value: !!bool false
|
169 |
+
data_number: # Art. 53; Annex XI(1)(2)(c)
|
170 |
+
verbose: 'Information on the data used for training: the number of data points'
|
171 |
+
value: !!bool false
|
172 |
+
data_characteristics: # Art. 53; Annex XI(1)(2)(c)
|
173 |
+
verbose: 'Information on the data used for training: data points scope and main characteristics applicable'
|
174 |
+
value: !!bool false
|
175 |
+
data_origin: # Art. 53; Annex XI(1)(2)(c)
|
176 |
+
verbose: 'Information on the data used for training: how the data was obtained and selected'
|
177 |
+
value: !!bool false
|
178 |
+
data_bias: # Art. 53; Annex XI(1)(2)(c)
|
179 |
+
verbose: 'Information on the data used for training: all other measures to detect the unsuitability of data sources and methods to detect identifiable biases, where applicable'
|
180 |
+
value: !!bool false
|
181 |
+
computation: # Art. 53; Annex XI(1)(2)(d)
|
182 |
+
verbose: 'The computational resources used to train the model (e.g. number of floating point operations – FLOPs), training time, and other relevant details related to the training'
|
183 |
+
value: !!bool false
|
184 |
+
energy_consumption: # Art. 53; Annex XI(1)(2)(e)
|
185 |
+
verbose: 'Known or estimated energy consumption of the model; in case not known, this could be based on information about computational resources used'
|
186 |
+
value: !!bool false
|
187 |
+
evaluation: # Art. 53; Annex XI(2)(1)
|
188 |
+
verbose: 'Detailed description of the evaluation strategies, including evaluation results, on the basis of available public evaluation protocols and tools or otherwise of other evaluation methodologies. Evaluation strategies shall include evaluation criteria, metrics and the methodology on the identification of limitations'
|
189 |
+
value: !!bool false
|
190 |
+
adversarial_testing: # Art. 53; Annex XI(2)(2)
|
191 |
+
verbose: 'Where applicable, detailed description of the measures put in place for the purpose of conducting internal and/or external adversarial testing (e.g. red teaming), model adaptations, including alignment and fine-tuning'
|
192 |
+
value: !!bool false
|
193 |
+
|
194 |
+
obligations_for_providers_of_gpai_models_with_systemic_risk:
|
195 |
+
evaluation: # Art. 55(1)(a)
|
196 |
+
verbose: 'Perform model evaluation in accordance with standardised protocols and tools reflecting the state of the art, including conducting and documenting adversarial testing of the model with a view to identify and mitigate systemic risk'
|
197 |
+
value: !!bool false
|
198 |
+
systematic_risk: # Art. 55(1)(b)
|
199 |
+
verbose: 'Assess and mitigate possible systemic risks at Union level, including their sources, that may stem from the development'
|
200 |
+
value: !!bool false
|
201 |
+
cybersecurity: # Art. 55(1)(d)
|
202 |
+
verbose: 'Ensure an adequate level of cybersecurity protection for the GPAI model with systemic risk and the physical infrastructure of the mode'
|
203 |
+
value: !!bool false
|
project_cc.md → project_cc.yaml
RENAMED
@@ -1,82 +1,102 @@
|
|
1 |
-
|
2 |
-
|
3 |
verbose: 'AI project is operated by a small or medium-sized enterprise'
|
4 |
value: !!bool false
|
5 |
|
6 |
-
|
7 |
-
|
8 |
verbose: 'AI project is being made available on the Union market for the first time'
|
9 |
value: !!bool false
|
10 |
-
|
11 |
verbose: 'AI project is supplied for first use directly to the deployer or for own use in the Union for its intended purpose;'
|
12 |
|
13 |
-
|
14 |
-
|
15 |
verbose: 'The owner of this AI project is a natural or legal person, public authority, agency or other body that develops an AI system or a general-purpose AI model or that has an AI system or a general-purpose AI model developed and places it on the market or puts the AI system into service under its own name or trademark, whether for payment or free of charge'
|
16 |
value: !!bool false
|
17 |
-
|
18 |
verbose: "AI project is placed on the market or put into service in the Union"
|
19 |
value: !!bool false
|
20 |
-
|
21 |
-
verbose: '
|
22 |
-
value: !!bool false
|
23 |
-
|
24 |
-
verbose: '
|
25 |
-
value: !!bool
|
26 |
-
|
27 |
-
verbose: '
|
28 |
-
value: !!bool
|
29 |
-
|
30 |
-
verbose: 'AI project
|
31 |
-
value: !!bool
|
32 |
-
|
33 |
-
verbose: 'a natural or legal person in the supply chain, other than
|
34 |
-
value: !!bool
|
35 |
-
|
36 |
-
|
|
|
37 |
|
38 |
-
|
39 |
-
|
40 |
verbose: 'AI project is a machine-based system that is designed to operate with varying levels of autonomy and that may exhibit adaptiveness after deployment, and that, for explicit or implicit objectives, infers, from the input it receives, how to generate outputs such as predictions, content, recommendations, or decisions that can influence physical or virtual environments'
|
41 |
-
value: !!bool
|
42 |
|
43 |
-
|
44 |
-
|
45 |
verbose: 'AI project is an AI model, including where such an AI model is trained with a large amount of data using self-supervision at scale, that displays significant generality and is capable of competently performing a wide range of distinct tasks regardless of the way the model is placed on the market and that can be integrated into a variety of downstream systems or applications, except AI models that are used for research, development or prototyping activities before they are placed on the market'
|
46 |
-
value: !!bool
|
47 |
|
48 |
-
|
49 |
-
|
50 |
verbose: 'AI project is or was specifically developed and put into service for the sole purpose of scientific research and development'
|
51 |
-
value: !!bool
|
52 |
pre_market: # Art. 2(8)
|
53 |
verbose: 'AI project strictly consists of research, testing or development activity of the sort that takes place prior to their being placed on the market or put into service'
|
54 |
-
value: !!bool
|
55 |
-
|
|
|
|
|
|
|
56 |
verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
|
57 |
-
value: !!bool
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
-
|
80 |
safety_component: # Art. 6(1)(a)
|
81 |
verbose: 'AI project is intended to be used as a safety component of a product'
|
82 |
value: !!bool false
|
@@ -118,23 +138,23 @@ high_risk_ai_system_status:
|
|
118 |
value: !!bool false
|
119 |
filter_exception_rights: # Art. 6(3)
|
120 |
verbose: 'The AI initiate does not pose a significant risk of harm to the health, safety or fundamental rights of natural persons, including by not materially influencing the outcome of decision making'
|
121 |
-
value: !!bool
|
122 |
filter_exception_narrow: # Art. 6(3)(a)
|
123 |
verbose: 'The AI project is intended to perform a narrow procedural task'
|
124 |
value: !!bool false
|
125 |
-
|
126 |
verbose: 'the AI project is intended to improve the result of a previously completed human activity'
|
127 |
value: !!bool false
|
128 |
filter_exception_deviation: # Art. 6(3)(c)
|
129 |
verbose: 'the AI system is intended to detect decision-making patterns or deviations from prior decision-making patterns and is not meant to replace or influence the previously completed human assessment, without proper human review'
|
130 |
value: !!bool false
|
131 |
-
|
132 |
verbose: 'the AI system is intended to perform a preparatory task to an assessment relevant for the purposes of the use cases listed in Annex III.'
|
133 |
value: !!bool false
|
134 |
|
135 |
risk_management_system:
|
136 |
established: # Article 9
|
137 |
-
verbose: 'Risk management system has been established, implemented, documented and maintained for AI
|
138 |
value: !!bool false
|
139 |
lifecycle: # Art. 9(2)
|
140 |
verbose: 'Risk management system (high-risk AI system) has been planned, run, reviewed, and updated, throughout the entire lifecycle of AI system'
|
@@ -224,74 +244,173 @@ record_keeping:
|
|
224 |
input: # Art. 12(2)(c)
|
225 |
verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the input data for which the search has led to a match'
|
226 |
value: !!bool false
|
227 |
-
|
|
|
|
|
228 |
|
229 |
transparency_and_provision_of_information_to_deployers:
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
human_oversight:
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
|
257 |
accuracy_robustness_cybersecurity:
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
|
267 |
quality_management_system:
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
|
273 |
transparency_obligations:
|
274 |
-
|
275 |
-
|
|
|
|
|
|
|
|
|
276 |
|
277 |
-
|
278 |
-
|
279 |
-
|
|
|
|
|
|
|
|
|
280 |
|
281 |
gpai_model_provider_obligations:
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
|
|
|
1 |
+
smb:
|
2 |
+
smb: # Art. 11(1)
|
3 |
verbose: 'AI project is operated by a small or medium-sized enterprise'
|
4 |
value: !!bool false
|
5 |
|
6 |
+
eu_market:
|
7 |
+
placed_on_market: # Art. 3(9)
|
8 |
verbose: 'AI project is being made available on the Union market for the first time'
|
9 |
value: !!bool false
|
10 |
+
put_into_service: #Art. 3(11)
|
11 |
verbose: 'AI project is supplied for first use directly to the deployer or for own use in the Union for its intended purpose;'
|
12 |
|
13 |
+
operator_role:
|
14 |
+
provider: # Art. 2
|
15 |
verbose: 'The owner of this AI project is a natural or legal person, public authority, agency or other body that develops an AI system or a general-purpose AI model or that has an AI system or a general-purpose AI model developed and places it on the market or puts the AI system into service under its own name or trademark, whether for payment or free of charge'
|
16 |
value: !!bool false
|
17 |
+
on_market: # Art 2
|
18 |
verbose: "AI project is placed on the market or put into service in the Union"
|
19 |
value: !!bool false
|
20 |
+
deployer: # Art. 2
|
21 |
+
verbose: 'AI project operator is a natural or legal person, public authority, agency or other body using an AI system under its authority except where the AI system is used in the course of a personal non-professional activity'
|
22 |
+
value: !!bool false
|
23 |
+
eu_located: # Art. 2
|
24 |
+
verbose: 'AI project operator has its place of establishment or location within the Union'
|
25 |
+
value: !!bool false
|
26 |
+
output_used: # Art. 2
|
27 |
+
verbose: 'The output produced by the AI system is used in the Union'
|
28 |
+
value: !!bool false
|
29 |
+
importer: # Art. 2
|
30 |
+
verbose: 'AI project operator is a natural or legal person located or established in the Union that places on the market an AI system that bears the name or trademark of a natural or legal person established in a third country'
|
31 |
+
value: !!bool false
|
32 |
+
distributor:
|
33 |
+
verbose: 'AI project operator is a natural or legal person in the supply chain, other than a provider or the importer, that makes an AI system available on the Union market'
|
34 |
+
value: !!bool false # Art. 2
|
35 |
+
product_manufacturer:
|
36 |
+
verbose: 'AI project operator is a product manufacturer'
|
37 |
+
value: !!bool false # Art. 2
|
38 |
|
39 |
+
ai_system:
|
40 |
+
ai_system: # Art. 3(1)
|
41 |
verbose: 'AI project is a machine-based system that is designed to operate with varying levels of autonomy and that may exhibit adaptiveness after deployment, and that, for explicit or implicit objectives, infers, from the input it receives, how to generate outputs such as predictions, content, recommendations, or decisions that can influence physical or virtual environments'
|
42 |
+
value: !!bool false
|
43 |
|
44 |
+
gpai_model:
|
45 |
+
gpai_model: # Art. 3(63)
|
46 |
verbose: 'AI project is an AI model, including where such an AI model is trained with a large amount of data using self-supervision at scale, that displays significant generality and is capable of competently performing a wide range of distinct tasks regardless of the way the model is placed on the market and that can be integrated into a variety of downstream systems or applications, except AI models that are used for research, development or prototyping activities before they are placed on the market'
|
47 |
+
value: !!bool false
|
48 |
|
49 |
+
excepted:
|
50 |
+
scientific: # Art. 2(6)
|
51 |
verbose: 'AI project is or was specifically developed and put into service for the sole purpose of scientific research and development'
|
52 |
+
value: !!bool false
|
53 |
pre_market: # Art. 2(8)
|
54 |
verbose: 'AI project strictly consists of research, testing or development activity of the sort that takes place prior to their being placed on the market or put into service'
|
55 |
+
value: !!bool false
|
56 |
+
open_source_ai_system: # Art. 2(11)
|
57 |
+
verbose: 'AI project is released under free and open-source licences'
|
58 |
+
value: !!bool false
|
59 |
+
open_source_gpai_model: # Art. 53(2)
|
60 |
verbose: 'AI project involves AI models that are released under a free and open-source licence that allows for the access, usage, modification, and distribution of the model, and whose parameters, including the weights, the information on the model architecture, and the information on model usage, are made publicly available. This exception shall not apply to general purpose AI models with systemic risks'
|
61 |
+
value: !!bool false
|
62 |
|
63 |
+
prohibited_practice:
|
64 |
+
ai_system:
|
65 |
+
manipulative: # Art. 5(1)(a)
|
66 |
+
verbose: 'The AI project deploys subliminal or purposefully manipulative or deceptive techniques, with the objective or effect of materially distorting the behavior of people by appreciably impairing their ability to make an informed decision, thereby causing them to take a decision that they would not have otherwise taken in a manner that causes or is reasonably likely to cause significant harm'
|
67 |
+
value: !!bool false
|
68 |
+
exploit_vulnerable: # Art. 5(1)(b)
|
69 |
+
verbose: 'The AI project exploits the vulnerabilities of natural people due to their age, disability or a specific social or economic situation, with the objective or effect of materially distorting their behaviour in a manner that causes or is reasonably likely to cause significant harm'
|
70 |
+
value: !!bool false
|
71 |
+
social_score: # Art. 5(1)(c)
|
72 |
+
verbose: 'The AI project is for the evaluation or classification of natural people over a certain period of time based on their social behaviour or known, inferred or predicted personal or personality characteristics, with the social score leading to at least one of the following: (i) detrimental or unfavourable treatment of certain natural people in social contexts that are unrelated to the contexts in which the data was originally generated or collected; (ii) detrimental or unfavourable treatment of certain natural people that is unjustified or disproportionate to their social behaviour or its gravity'
|
73 |
+
value: !!bool false
|
74 |
+
crime_prediction: # Art. 5(1)(d)
|
75 |
+
verbose: 'This AI project makes risk assessments of natural persons in order to assess or predict the risk of them committing a criminal offence, based solely on the profiling of the natural person or on assessing their personality traits and characteristics (and does not support the human assessment of the involvement of a person in a criminal activity, which is already based on objective and verifiable facts directly linked to a criminal activity)'
|
76 |
+
value: !!bool false
|
77 |
+
untarged_face: # Art. 5(1)(e)
|
78 |
+
verbose: 'This AI project creates or expand facial recognition databases through the untargeted scraping of facial images from the internet or CCTV footage'
|
79 |
+
value: !!bool false
|
80 |
+
emotion_prediction: # Art. 5(1)(f)
|
81 |
+
verbose: 'The AI project infers emotions of a natural person in the areas of workplace and education institutions and is not intended to be put in place or into the market for medical or safety reasons'
|
82 |
+
value: !!bool false
|
83 |
+
biometric:
|
84 |
+
categorization: # Art. 5(1)(g)
|
85 |
+
verbose: 'The AI project involves the use of biometric categorisation systems that categorise individually natural persons based on their biometric data to deduce or infer their race, political opinions, trade union membership, religious or philosophical beliefs, sex life or sexual orientation; this prohibition does not cover any labelling or filtering of lawfully acquired biometric datasets, such as images, based on biometric data or categorizing of biometric data in the area of law enforcement'
|
86 |
+
value: !!bool false
|
87 |
+
real_time: # Art. 5(1)(h)
|
88 |
+
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement'
|
89 |
+
value: !!bool false
|
90 |
+
real_time_exception_victim: # Art. 5(1)(h)
|
91 |
+
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the targeted search for specific victims of abduction, trafficking in human beings or sexual exploitation of human beings, or the search for missing persons'
|
92 |
+
value: !!bool false
|
93 |
+
real_time_exception_threat:
|
94 |
+
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the prevention of a specific, substantial and imminent threat to the life or physical safety of natural persons or a genuine and present or genuine and foreseeable threat of a terrorist attack'
|
95 |
+
real_time_exception_investigation:
|
96 |
+
verbose: 'The AI project involves use of ‘real-time’ remote biometric identification systems in publicly accessible spaces for the purposes of law enforcement stricly for the localisation or identification of a person suspected of having committed a criminal offence, for the purpose of conducting a criminal investigation or prosecution or executing a criminal penalty for offences referred to in Annex II and punishable in the Member State concerned by a custodial sentence or a detention order for a maximum period of at least four years.'
|
97 |
+
value: !!bool false
|
98 |
|
99 |
+
high_risk_ai_system:
|
100 |
safety_component: # Art. 6(1)(a)
|
101 |
verbose: 'AI project is intended to be used as a safety component of a product'
|
102 |
value: !!bool false
|
|
|
138 |
value: !!bool false
|
139 |
filter_exception_rights: # Art. 6(3)
|
140 |
verbose: 'The AI initiate does not pose a significant risk of harm to the health, safety or fundamental rights of natural persons, including by not materially influencing the outcome of decision making'
|
141 |
+
value: !!bool false
|
142 |
filter_exception_narrow: # Art. 6(3)(a)
|
143 |
verbose: 'The AI project is intended to perform a narrow procedural task'
|
144 |
value: !!bool false
|
145 |
+
filter_exception_human: # Art. 6(3)(b)
|
146 |
verbose: 'the AI project is intended to improve the result of a previously completed human activity'
|
147 |
value: !!bool false
|
148 |
filter_exception_deviation: # Art. 6(3)(c)
|
149 |
verbose: 'the AI system is intended to detect decision-making patterns or deviations from prior decision-making patterns and is not meant to replace or influence the previously completed human assessment, without proper human review'
|
150 |
value: !!bool false
|
151 |
+
filter_exception_prep: # Art. 6(3)(d)
|
152 |
verbose: 'the AI system is intended to perform a preparatory task to an assessment relevant for the purposes of the use cases listed in Annex III.'
|
153 |
value: !!bool false
|
154 |
|
155 |
risk_management_system:
|
156 |
established: # Article 9
|
157 |
+
verbose: 'Risk management system has been established, implemented, documented and maintained for AI project'
|
158 |
value: !!bool false
|
159 |
lifecycle: # Art. 9(2)
|
160 |
verbose: 'Risk management system (high-risk AI system) has been planned, run, reviewed, and updated, throughout the entire lifecycle of AI system'
|
|
|
244 |
input: # Art. 12(2)(c)
|
245 |
verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the input data for which the search has led to a match'
|
246 |
value: !!bool false
|
247 |
+
identification: # Art. 12(2)(d)
|
248 |
+
verbose: 'For the remote biometric identification systems high-risk AI systems referred to in point 1 (a), of Annex III, the logging capabilities shall provide, at a minimum, the identification of the natural persons involved in the verification of the results, as referred to in Article 14(5)'
|
249 |
+
value: !!bool false
|
250 |
|
251 |
transparency_and_provision_of_information_to_deployers:
|
252 |
+
interpretability: # Art. 13(1)
|
253 |
+
verbose: 'AI system is designed and developed to ensure operation is sufficiently transparent for deployers to interpret output and use appropriately'
|
254 |
+
value: !!bool false
|
255 |
+
compliance: # Art. 13(1)
|
256 |
+
verbose: 'AI system is designed and developed with transparency to ensure compliance with provider and deployer obligations in Section 3'
|
257 |
+
value: !!bool false
|
258 |
+
instructions: # Art. 13(2)
|
259 |
+
verbose: 'AI system is accompanied by instructions for use in appropriate digital format or otherwise, with concise, complete, correct, clear, relevant, accessible, and comprehensible information for deployers'
|
260 |
+
value: !!bool false
|
261 |
+
contact_details: # Art. 13(3)(a)
|
262 |
+
verbose: 'Instructions include provider identity and contact details, and if applicable, authorized representative details'
|
263 |
+
value: !!bool false
|
264 |
+
characteristics: # Art. 13(3)(b)(i)
|
265 |
+
verbose: 'Instructions include AI system characteristics, capabilities, performance limitations, and intended purpose'
|
266 |
+
value: !!bool false
|
267 |
+
metrics: # Art. 13(3)(b)(ii)
|
268 |
+
verbose: 'Instructions include accuracy metrics, robustness, cybersecurity, and potential impacts on these'
|
269 |
+
value: !!bool false
|
270 |
+
foreseeable: # Art. 13(3)(b)(iii)
|
271 |
+
verbose: 'Instructions include foreseeable circumstances that may risk health, safety, or fundamental rights'
|
272 |
+
value: !!bool false
|
273 |
+
output: # Art. 13(3)(b)(iv)
|
274 |
+
verbose: 'Instructions include technical capabilities to provide information relevant to explaining output'
|
275 |
+
value: !!bool false
|
276 |
+
specific_persons: # Art. 13(3)(b)(v)
|
277 |
+
verbose: 'Instructions include performance regarding specific persons or groups, if applicable'
|
278 |
+
value: !!bool false
|
279 |
+
data: # Art. 13(3)(b)(vi)
|
280 |
+
verbose: 'Instructions include input data specifications and relevant training, validation, and testing dataset information'
|
281 |
+
value: !!bool false
|
282 |
+
deployers: # Art. 13(3)(b)(vii)
|
283 |
+
verbose: 'Instructions include information to enable deployers to interpret and appropriately use AI system output'
|
284 |
+
value: !!bool false
|
285 |
+
changes: # Art. 13(3)(c)
|
286 |
+
verbose: 'Instructions include predetermined changes to AI system and its performance since initial conformity assessment'
|
287 |
+
value: !!bool false
|
288 |
+
oversight_measures: # Art. 13(3)(d)
|
289 |
+
verbose: 'Instructions include human oversight measures and technical measures for output interpretation'
|
290 |
+
value: !!bool false
|
291 |
+
hardware: # Art. 13(3)(e)
|
292 |
+
verbose: 'Instructions include computational and hardware resource needs, expected lifetime, and maintenance measures'
|
293 |
+
value: !!bool false
|
294 |
+
logging: # Art. 13(3)(f)
|
295 |
+
verbose: 'Instructions include description of mechanisms for deployers to collect, store, and interpret logs, if applicable'
|
296 |
+
value: !!bool false
|
297 |
|
298 |
human_oversight:
|
299 |
+
designed: # Art. 14(1)
|
300 |
+
verbose: 'AI system is designed and developed to be effectively overseen by natural persons during use, including appropriate human-machine interface tools'
|
301 |
+
value: !!bool false
|
302 |
+
minimize_risks: # Art. 14(2)
|
303 |
+
verbose: 'Human oversight aims to prevent or minimize risks to health, safety, or fundamental rights during intended use or foreseeable misuse'
|
304 |
+
value: !!bool false
|
305 |
+
commensurate: # Art. 14(3)
|
306 |
+
verbose: 'Oversight measures are commensurate with risks, autonomy level, and use context, ensured through provider-built measures and/or deployer-implemented measures'
|
307 |
+
value: !!bool false
|
308 |
+
understandable: # Art. 14(4)
|
309 |
+
verbose: 'AI system enables assigned persons to understand its capacities and limitations, monitor operation, and detect anomalies'
|
310 |
+
value: !!bool false
|
311 |
+
automation_bias: # Art. 14(4)(a)
|
312 |
+
verbose: 'AI system enables assigned persons to be aware of potential automation bias'
|
313 |
+
value: !!bool false
|
314 |
+
interpretabilty: # Art. 14(4)(c)
|
315 |
+
verbose: 'AI system enables assigned persons to correctly interpret its output'
|
316 |
+
value: !!bool false
|
317 |
+
override: # Art. 14(4)(d)
|
318 |
+
verbose: 'AI system enables assigned persons to decide not to use it or override its output'
|
319 |
+
value: !!bool false
|
320 |
+
stop_button: # Art. 14(4)(e)
|
321 |
+
verbose: 'AI system enables assigned persons to intervene or halt the system through a stop button or similar procedure'
|
322 |
+
value: !!bool false
|
323 |
+
verification: # Art. 14(5)
|
324 |
+
verbose: 'For Annex III point 1(a) systems, actions or decisions require verification by at least two competent persons, with exceptions for law enforcement, migration, border control, or asylum'
|
325 |
+
value: !!bool false
|
326 |
|
327 |
accuracy_robustness_cybersecurity:
|
328 |
+
design: # Art. 15(1)
|
329 |
+
verbose: 'AI system is designed and developed to achieve appropriate levels of accuracy, robustness, and cybersecurity, performing consistently throughout its lifecycle'
|
330 |
+
value: !!bool false
|
331 |
+
metrics_in_instructions: # Art. 15(3)
|
332 |
+
verbose: 'Accuracy levels and relevant metrics are declared in accompanying instructions of use'
|
333 |
+
value: !!bool false
|
334 |
+
error_resiliance: # Art. 15(4)
|
335 |
+
verbose: 'AI system is resilient against errors, faults, or inconsistencies, with technical and organizational measures implemented'
|
336 |
+
value: !!bool false
|
337 |
+
bias: # Art. 15(4)
|
338 |
+
verbose: 'AI system that continues learning after deployment is designed to eliminate or reduce risk of biased outputs influencing future operations'
|
339 |
+
value: !!bool false
|
340 |
+
unauthorized_use: # Art. 15(5)
|
341 |
+
verbose: 'AI system is resilient against unauthorized third-party attempts to alter use, outputs, or performance'
|
342 |
+
value: !!bool false
|
343 |
+
cybersecurity_solutions: # Art. 15(5)
|
344 |
+
verbose: 'Cybersecurity solutions are appropriate to relevant circumstances and risks'
|
345 |
+
value: !!bool false
|
346 |
+
ai_vulnerabilities: # Art. 15(5)
|
347 |
+
verbose: 'Technical solutions address AI-specific vulnerabilities, including measures against data poisoning, model poisoning, adversarial examples, and confidentiality attacks'
|
348 |
+
value: !!bool false
|
349 |
|
350 |
quality_management_system:
|
351 |
+
quality_management_system: # Art. 17(1)(a)
|
352 |
+
verbose: 'Initiative is subject to a quality management system with strategy for regulatory compliance'
|
353 |
+
value: !!bool false
|
354 |
+
design: # Art. 17(1)(b)
|
355 |
+
verbose: 'System includes techniques, procedures, and actions for design, control, and verification of high-risk AI system'
|
356 |
+
value: !!bool false
|
357 |
+
quality_control: # Art. 17(1)(c)
|
358 |
+
verbose: 'System includes techniques, procedures, and actions for development, quality control, and quality assurance'
|
359 |
+
value: !!bool false
|
360 |
+
testing: # Art. 17(1)(d)
|
361 |
+
verbose: 'System includes examination, test, and validation procedures before, during, and after development'
|
362 |
+
value: !!bool false
|
363 |
|
364 |
transparency_obligations:
|
365 |
+
synthetic_content: # Art. 50(2)
|
366 |
+
verbose: 'Providers of AI systems generating synthetic content ensure outputs are marked and detectable as artificially generated'
|
367 |
+
value: !!bool false
|
368 |
+
marking_solutions: # Art. 50(2)
|
369 |
+
verbose: 'Technical solutions for marking are effective, interoperable, robust, and reliable'
|
370 |
+
value: !!bool false
|
371 |
|
372 |
+
gpai_model_systematic_risk:
|
373 |
+
evaluation: # Art. 51
|
374 |
+
verbose: 'Model impact capabilities were evaluated using appropriate technical tools and methodologies'
|
375 |
+
value: !!bool false
|
376 |
+
flops: # Art. 51(2)
|
377 |
+
verbose: 'Cumulative compute for training measured in floating point operations (FLOPs)'
|
378 |
+
value: !!bool false
|
379 |
|
380 |
gpai_model_provider_obligations:
|
381 |
+
intended_uses: # Art. 53(1)(a); Annex XI(1)(1)(a-c)
|
382 |
+
verbose: 'Provide information on intended tasks, integration types, and acceptable use policies'
|
383 |
+
value: !!bool false
|
384 |
+
model_architecture: # Art. 53(1)(a); Annex XI(1)(1)(d-f)
|
385 |
+
verbose: 'Provide details on model architecture, parameters, input/output modalities, and license'
|
386 |
+
value: !!bool false
|
387 |
+
training_methodologies: # Art. 53(1)(b); Annex XI(1)(2)(b)
|
388 |
+
verbose: 'Describe training methodologies, key design choices, and optimization goals'
|
389 |
+
value: !!bool false
|
390 |
+
data: # Art. 53(1)(b); Annex XI(1)(2)(c)
|
391 |
+
verbose: 'Provide information on training, testing, and validation data'
|
392 |
+
value: !!bool false
|
393 |
+
computation: # Art. 53(1)(b); Annex XI(1)(2)(d-e)
|
394 |
+
verbose: 'Disclose computational resources and energy consumption for training'
|
395 |
+
value: !!bool false
|
396 |
+
evaluation: # Art. 53(1)(b); Annex XI(2)(1-2)
|
397 |
+
verbose: 'Describe evaluation strategies, results, and adversarial testing measures'
|
398 |
+
value: !!bool false
|
399 |
+
general_description: # Art. 53(1)(b); Annex XII(1)(a-h)
|
400 |
+
verbose: 'To downstream providers, provide general description of GPAI model, including intended tasks and integration types'
|
401 |
+
value: !!bool false
|
402 |
+
development_process: # Art. 53(1)(b); Annex XII(2)(a-c)
|
403 |
+
verbose: 'To downstream providers, describe model elements, development process, and integration requirements'
|
404 |
+
value: !!bool false
|
405 |
|
406 |
+
gpai_obligations_for_systemic_risk_models:
|
407 |
+
evaluation: # Art. 55(1)(a)
|
408 |
+
verbose: 'Perform model evaluation using standardized protocols and conduct adversarial testing'
|
409 |
+
value: !!bool false
|
410 |
+
mitigation: # Art. 55(1)(b)
|
411 |
+
verbose: 'Assess and mitigate possible systemic risks at Union level'
|
412 |
+
value: !!bool false
|
413 |
+
cybersecurity: # Art. 55(1)(d)
|
414 |
+
verbose: 'Ensure adequate cybersecurity protection for the model and infrastructure'
|
415 |
+
value: !!bool false
|
416 |
|