Spaces:
No application file
No application file
Upload 46 files
Browse files- dataset/leetcode_dataset.csv +0 -0
- embeddings_cache.pkl +3 -0
- projects/default.json +164 -0
- projects/default_project.json +13 -0
- projects/po.json +21 -0
- requirements.txt +13 -0
- src/modules/__init__.py +0 -0
- src/modules/__pycache__/__init__.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/.env +2 -0
- src/modules/module1_question_generation/__init__.py +0 -0
- src/modules/module1_question_generation/__pycache__/__init__.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/__pycache__/file_processing.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/__pycache__/groq_client.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/__pycache__/project_controller.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/__pycache__/tool_controller.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/app.py +237 -0
- src/modules/module1_question_generation/embeddings_cache.pkl +3 -0
- src/modules/module1_question_generation/file_processing.py +16 -0
- src/modules/module1_question_generation/groq_client.py +51 -0
- src/modules/module1_question_generation/project_controller.py +38 -0
- src/modules/module1_question_generation/prompts.py +0 -0
- src/modules/module1_question_generation/tool_controller.py +26 -0
- src/modules/module1_question_generation/tools/__init__.py +0 -0
- src/modules/module1_question_generation/tools/__pycache__/__init__.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/tools/__pycache__/tools.cpython-312.pyc +0 -0
- src/modules/module1_question_generation/tools/tools.py +35 -0
- src/modules/module1_question_generation/utils/config.py +0 -0
- src/modules/module1_question_generation/utils/helpers.py +0 -0
- src/modules/module2_relevancy/__init__.py +0 -0
- src/modules/module2_relevancy/__pycache__/__init__.cpython-312.pyc +0 -0
- src/modules/module2_relevancy/__pycache__/relevance_analyzer.cpython-312.pyc +0 -0
- src/modules/module2_relevancy/relevance_analyzer.py +254 -0
- src/modules/module3_compare/__pycache__/model.cpython-312.pyc +0 -0
- src/modules/module3_compare/embeddings_cache.pkl +3 -0
- src/modules/module3_compare/model.py +59 -0
- src/modules/module4_bias/__pycache__/bias.cpython-312.pyc +0 -0
- src/modules/module4_bias/bias.py +91 -0
- src/temp_bias.py +79 -0
- src/tracer/app.py +148 -0
- src/tracer/package/.env +2 -0
- src/tracer/package/__init__.py +0 -0
- src/tracer/package/__pycache__/__init__.cpython-312.pyc +0 -0
- src/tracer/package/__pycache__/validlm.cpython-312.pyc +0 -0
- src/tracer/package/validlm.py +193 -0
- src/tracer/tools/__init__.py +0 -0
- src/tracer/tools/tools.py +35 -0
dataset/leetcode_dataset.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
embeddings_cache.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a37a15611f94584c735999d88868417ddcd3e6865b1f01413b6f644e1e8b6ca1
|
3 |
+
size 2803614
|
projects/default.json
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"project_name": "default",
|
3 |
+
"assertions": {
|
4 |
+
"deterministic": [
|
5 |
+
{
|
6 |
+
"check_type": "contains",
|
7 |
+
"value": "bmc"
|
8 |
+
},
|
9 |
+
{
|
10 |
+
"check_type": "regex",
|
11 |
+
"value": ".*"
|
12 |
+
}
|
13 |
+
],
|
14 |
+
"misc": [],
|
15 |
+
"factual": false,
|
16 |
+
"sql-only": true,
|
17 |
+
"json-only": true
|
18 |
+
},
|
19 |
+
"log_history": [],
|
20 |
+
"accuracy_history": {
|
21 |
+
"DSA": [
|
22 |
+
[
|
23 |
+
"2025-02-23 23:08:34",
|
24 |
+
0.5624579697847366
|
25 |
+
],
|
26 |
+
[
|
27 |
+
"2025-02-23 23:09:32",
|
28 |
+
0.5233337700366973
|
29 |
+
],
|
30 |
+
[
|
31 |
+
"2025-02-23 23:14:15",
|
32 |
+
0.603324833241376
|
33 |
+
],
|
34 |
+
[
|
35 |
+
"2025-02-23 23:15:34",
|
36 |
+
0.5898826479911804
|
37 |
+
],
|
38 |
+
[
|
39 |
+
"2025-02-23 23:20:53",
|
40 |
+
0.5855140775442124
|
41 |
+
],
|
42 |
+
[
|
43 |
+
"2025-02-23 23:23:21",
|
44 |
+
0.5792156517505646
|
45 |
+
],
|
46 |
+
[
|
47 |
+
"2025-02-23 23:24:05",
|
48 |
+
0.6172547936439514
|
49 |
+
],
|
50 |
+
[
|
51 |
+
"2025-02-23 23:26:20",
|
52 |
+
0.6511377811431884
|
53 |
+
],
|
54 |
+
[
|
55 |
+
"2025-02-24 10:44:12",
|
56 |
+
0.5467980474233627
|
57 |
+
],
|
58 |
+
[
|
59 |
+
"2025-02-24 14:29:43",
|
60 |
+
0.5033589959144592
|
61 |
+
]
|
62 |
+
],
|
63 |
+
"Technical": [
|
64 |
+
[
|
65 |
+
"2025-02-23 23:33:09",
|
66 |
+
50.0
|
67 |
+
],
|
68 |
+
[
|
69 |
+
"2025-02-23 23:33:54",
|
70 |
+
60.0
|
71 |
+
],
|
72 |
+
[
|
73 |
+
"2025-02-23 23:34:14",
|
74 |
+
60.0
|
75 |
+
],
|
76 |
+
[
|
77 |
+
"2025-02-23 23:34:37",
|
78 |
+
45.45454545454545
|
79 |
+
],
|
80 |
+
[
|
81 |
+
"2025-02-23 23:36:38",
|
82 |
+
70.0
|
83 |
+
],
|
84 |
+
[
|
85 |
+
"2025-02-23 23:37:39",
|
86 |
+
54.54545454545454
|
87 |
+
],
|
88 |
+
[
|
89 |
+
"2025-02-23 23:38:03",
|
90 |
+
50.0
|
91 |
+
],
|
92 |
+
[
|
93 |
+
"2025-02-24 14:31:55",
|
94 |
+
90.9090909090909
|
95 |
+
]
|
96 |
+
],
|
97 |
+
"Behaviour": [
|
98 |
+
[
|
99 |
+
"2025-02-24 11:09:03",
|
100 |
+
0.8181818181818182
|
101 |
+
],
|
102 |
+
[
|
103 |
+
"2025-02-24 11:29:39",
|
104 |
+
0.0
|
105 |
+
],
|
106 |
+
[
|
107 |
+
"2025-02-24 11:30:28",
|
108 |
+
0.0
|
109 |
+
],
|
110 |
+
[
|
111 |
+
"2025-02-24 11:31:35",
|
112 |
+
0.8181818181818182
|
113 |
+
],
|
114 |
+
[
|
115 |
+
"2025-02-24 11:34:03",
|
116 |
+
0.5833333333333334
|
117 |
+
],
|
118 |
+
[
|
119 |
+
"2025-02-24 11:38:26",
|
120 |
+
0.9
|
121 |
+
],
|
122 |
+
[
|
123 |
+
"2025-02-24 12:08:28",
|
124 |
+
0.0
|
125 |
+
],
|
126 |
+
[
|
127 |
+
"2025-02-24 12:11:57",
|
128 |
+
0.0
|
129 |
+
],
|
130 |
+
[
|
131 |
+
"2025-02-24 12:13:39",
|
132 |
+
0.0
|
133 |
+
],
|
134 |
+
[
|
135 |
+
"2025-02-24 12:16:24",
|
136 |
+
0.0
|
137 |
+
],
|
138 |
+
[
|
139 |
+
"2025-02-24 12:19:08",
|
140 |
+
0.6363636363636364
|
141 |
+
],
|
142 |
+
[
|
143 |
+
"2025-02-24 12:22:44",
|
144 |
+
0.5454545454545454
|
145 |
+
],
|
146 |
+
[
|
147 |
+
"2025-02-24 12:34:16",
|
148 |
+
0.7272727272727273
|
149 |
+
],
|
150 |
+
[
|
151 |
+
"2025-02-24 12:37:06",
|
152 |
+
0.9166666666666666
|
153 |
+
],
|
154 |
+
[
|
155 |
+
"2025-02-24 12:39:09",
|
156 |
+
0.4
|
157 |
+
],
|
158 |
+
[
|
159 |
+
"2025-02-24 12:50:24",
|
160 |
+
1.0
|
161 |
+
]
|
162 |
+
]
|
163 |
+
}
|
164 |
+
}
|
projects/default_project.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"project_name": "default_project",
|
3 |
+
"assertions": {
|
4 |
+
"deterministic": [],
|
5 |
+
"misc": [],
|
6 |
+
"factual": false,
|
7 |
+
"sql-only": false,
|
8 |
+
"json-only": false
|
9 |
+
|
10 |
+
},
|
11 |
+
"log_history": [],
|
12 |
+
"accuracy_history": []
|
13 |
+
}
|
projects/po.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"project_name": "po",
|
3 |
+
"assertions": {
|
4 |
+
"deterministic": [],
|
5 |
+
"misc": [],
|
6 |
+
"factual": "",
|
7 |
+
"sql-only": false,
|
8 |
+
"json-only": false
|
9 |
+
},
|
10 |
+
"log_history": [],
|
11 |
+
"accuracy_history": {
|
12 |
+
"DSA": [],
|
13 |
+
"Technical": [],
|
14 |
+
"Behaviour": [
|
15 |
+
[
|
16 |
+
"2025-02-24 11:51:54",
|
17 |
+
0.7272727272727273
|
18 |
+
]
|
19 |
+
]
|
20 |
+
}
|
21 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
python-docx
|
3 |
+
PyPDF2
|
4 |
+
groq
|
5 |
+
python-dotenv
|
6 |
+
nltk
|
7 |
+
pandas
|
8 |
+
scikit-learn==1.3.0
|
9 |
+
matplotlib==3.7.1
|
10 |
+
sentence-transformers==2.2.2
|
11 |
+
rake-nltk==1.0.6
|
12 |
+
spacy
|
13 |
+
textblob
|
src/modules/__init__.py
ADDED
File without changes
|
src/modules/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (199 Bytes). View file
|
|
src/modules/module1_question_generation/.env
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# GROQ_API_KEY="xai-eVZLU4OIrvTFco272DRuIyI1EoSd54eWjsKVZ4PrepUJ8WxZOwbfDnKbsoHSd96r9npTvsYdbtUzqd6x"
|
2 |
+
GROQ_API_KEY="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"
|
src/modules/module1_question_generation/__init__.py
ADDED
File without changes
|
src/modules/module1_question_generation/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (227 Bytes). View file
|
|
src/modules/module1_question_generation/__pycache__/file_processing.cpython-312.pyc
ADDED
Binary file (1.19 kB). View file
|
|
src/modules/module1_question_generation/__pycache__/groq_client.cpython-312.pyc
ADDED
Binary file (2.93 kB). View file
|
|
src/modules/module1_question_generation/__pycache__/project_controller.cpython-312.pyc
ADDED
Binary file (2.49 kB). View file
|
|
src/modules/module1_question_generation/__pycache__/tool_controller.cpython-312.pyc
ADDED
Binary file (1.48 kB). View file
|
|
src/modules/module1_question_generation/app.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import numpy as np
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import datetime
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
# Adjust the system path to find project modules
|
10 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
11 |
+
project_root = os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))
|
12 |
+
sys.path.append(project_root)
|
13 |
+
|
14 |
+
from src.modules.module2_relevancy.relevance_analyzer import EnhancedRelevanceAnalyzer
|
15 |
+
from groq_client import GroqClient
|
16 |
+
from file_processing import extract_text_from_file
|
17 |
+
from src.modules.module3_compare.model import QuestionSimilarityModel
|
18 |
+
from src.modules.module4_bias.bias import screen_questions
|
19 |
+
from src.modules.module1_question_generation.project_controller import Project
|
20 |
+
from src.modules.module1_question_generation.tool_controller import *
|
21 |
+
DATASET_DIR = "dataset"
|
22 |
+
project_control = Project()
|
23 |
+
if 'page' not in st.session_state:
|
24 |
+
st.session_state.page = 'main'
|
25 |
+
if ('accuracy_history' not in st.session_state):
|
26 |
+
st.session_state['accuracy_history'] = {
|
27 |
+
"DSA" : [],
|
28 |
+
"Technical" : [],
|
29 |
+
"Behaviour": []
|
30 |
+
}
|
31 |
+
|
32 |
+
def main():
|
33 |
+
|
34 |
+
if st.session_state.page == 'main':
|
35 |
+
sidebar()
|
36 |
+
if ('current_project' in st.session_state):
|
37 |
+
if (st.session_state['current_project']['project_name'] == 'default'):
|
38 |
+
st.title("Interview Question Generator & Analyzer")
|
39 |
+
main_page()
|
40 |
+
else:
|
41 |
+
st.subheader('No project selected')
|
42 |
+
elif st.session_state.page == 'configure':
|
43 |
+
configure_page()
|
44 |
+
|
45 |
+
def sidebar():
|
46 |
+
st.sidebar.title("Project Options")
|
47 |
+
project_action = st.sidebar.selectbox("Select Action", ["Open Existing Project", "Create New Project"])
|
48 |
+
if project_action == "Create New Project":
|
49 |
+
new_project_name = st.sidebar.text_input("Enter Project Name")
|
50 |
+
print('Title: ', new_project_name)
|
51 |
+
if st.sidebar.button("Create Project") and new_project_name:
|
52 |
+
if new_project_name in project_control.list_projects():
|
53 |
+
st.sidebar.error("Project with this name already exists.")
|
54 |
+
else:
|
55 |
+
project_data = project_control.initialize_project(new_project_name)
|
56 |
+
st.session_state["current_project"] = project_data
|
57 |
+
st.success(f"Project '{new_project_name}' created successfully!")
|
58 |
+
|
59 |
+
elif project_action == "Open Existing Project":
|
60 |
+
existing_projects = project_control.list_projects()
|
61 |
+
selected_project = st.sidebar.selectbox("Select Project", existing_projects)
|
62 |
+
if st.sidebar.button("Open Project") and selected_project:
|
63 |
+
project_data = project_control.load_project(selected_project)
|
64 |
+
if project_data:
|
65 |
+
st.session_state["current_project"] = project_data
|
66 |
+
else:
|
67 |
+
st.sidebar.error("Failed to load project_control.")
|
68 |
+
if ('current_project' in st.session_state and st.sidebar.button('Configure Project')):
|
69 |
+
st.session_state.page = 'configure'
|
70 |
+
|
71 |
+
def main_page():
|
72 |
+
client = GroqClient()
|
73 |
+
analyzer = EnhancedRelevanceAnalyzer()
|
74 |
+
similarity_model = QuestionSimilarityModel('dataset/leetcode_dataset.csv')
|
75 |
+
project = st.session_state["current_project"]
|
76 |
+
|
77 |
+
st.subheader('Project: ', project['project_name'])
|
78 |
+
|
79 |
+
job_role = st.text_input("Enter Job Role")
|
80 |
+
question_type = st.selectbox("Type of questions", ["DSA", "Technical", "Behaviour"])
|
81 |
+
jd_file = st.file_uploader("Upload Job Description (PDF/DOCX)", type=["pdf", "docx"])
|
82 |
+
|
83 |
+
|
84 |
+
if jd_file and job_role and question_type and st.button('Get questions') :
|
85 |
+
with st.spinner("Analyzing Job Description..."):
|
86 |
+
jd_text = extract_text_from_file(jd_file)
|
87 |
+
|
88 |
+
if not analyzer.check_title_jd_match(job_role, jd_text):
|
89 |
+
st.error("⚠️ Job description doesn't match the job title! Upload a relevant JD.")
|
90 |
+
st.stop()
|
91 |
+
|
92 |
+
questions = client.generate_questions(job_role, jd_text, question_type)
|
93 |
+
|
94 |
+
# Deterministic
|
95 |
+
d_results = verify_deterministic_assertions(questions, project["assertions"])
|
96 |
+
df_results = pd.DataFrame(list(d_results.items()), columns=["Assertion Type", "Result"])
|
97 |
+
st.table(df_results)
|
98 |
+
question_lines = [q.strip() for q in questions.split('\n') if q.strip()]
|
99 |
+
if question_lines and not question_lines[0][0].isdigit():
|
100 |
+
question_lines = question_lines[1:]
|
101 |
+
|
102 |
+
# first_five_questions = question_lines[:10]
|
103 |
+
# remaining_questions = question_lines[5:15]
|
104 |
+
scores = []
|
105 |
+
|
106 |
+
if (question_type == "DSA"):
|
107 |
+
similarity_results = similarity_model.check_similarity(question_lines)
|
108 |
+
scores = similarity_results
|
109 |
+
st.subheader("DSA questions with similarity analysis")
|
110 |
+
score = 0
|
111 |
+
for i, (question, result) in enumerate(zip(question_lines, similarity_results), 1):
|
112 |
+
st.write(f"{i}. {question}")
|
113 |
+
score += result["relevance_score"]
|
114 |
+
with st.expander(f"Similarity Analysis for Question {i}"):
|
115 |
+
st.write(f"Similarity Score: {result['relevance_score']:.2f}")
|
116 |
+
st.write(f"Best Match: {result['best_match']['title']}")
|
117 |
+
st.write(f"Difficulty: {result['best_match']['difficulty']}")
|
118 |
+
if result['matched_sources']:
|
119 |
+
st.write("\nSimilar Questions:")
|
120 |
+
for source in result['matched_sources']:
|
121 |
+
st.write(f"- {source['title']} (Difficulty: {source['difficulty']})")
|
122 |
+
overall_similarity = score / len(question_lines)
|
123 |
+
|
124 |
+
st.metric("Overall Relevance", f"{overall_similarity*100:.1f}%")
|
125 |
+
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
126 |
+
project['accuracy_history'][question_type].append((timestamp, overall_similarity))
|
127 |
+
|
128 |
+
# if (question_type == "Technical" or question_type == "Behaviour"):
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
if (question_type == "Technical"):
|
133 |
+
for q in question_lines:
|
134 |
+
st.write(f"- {q}")
|
135 |
+
scores = analyzer.calculate_question_scores(jd_text, question_lines)
|
136 |
+
avg_score = sum(scores) / len(scores)
|
137 |
+
|
138 |
+
half_avg = avg_score / 1.25
|
139 |
+
count_above_half = sum(1 for s in scores if s > half_avg)
|
140 |
+
overall_relevance = (count_above_half / len(scores)) * 100
|
141 |
+
|
142 |
+
st.subheader("Analysis Results")
|
143 |
+
st.metric("Overall Relevance", f"{overall_relevance:.1f}%")
|
144 |
+
|
145 |
+
# Store accuracy with timestamp
|
146 |
+
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
147 |
+
project['accuracy_history'][question_type].append((timestamp, overall_relevance))
|
148 |
+
|
149 |
+
if question_type == "Behaviour":
|
150 |
+
valid_bias_questions, invalid_bias_questions, bias_accuracy, validity = screen_questions(question_lines)
|
151 |
+
for i, q in enumerate(question_lines):
|
152 |
+
st.write(f"- {f'[Invalid {validity[i]:.2f}]' if validity[i] == 1 else f'[ Valid {validity[i]:.2f}]'} {q}")
|
153 |
+
|
154 |
+
st.metric("Bias Accuracy", f"{bias_accuracy * 100:.1f}%")
|
155 |
+
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
156 |
+
project['accuracy_history'][question_type].append((timestamp, bias_accuracy))
|
157 |
+
|
158 |
+
# Plot accuracy history
|
159 |
+
if project['accuracy_history']:
|
160 |
+
st.subheader("Accuracy History")
|
161 |
+
timestamps, accuracies = zip(*project['accuracy_history'][question_type])
|
162 |
+
fig, ax = plt.subplots()
|
163 |
+
ax.plot(timestamps, accuracies, marker='o')
|
164 |
+
ax.set_xlabel("Timestamp")
|
165 |
+
ax.set_ylabel("Overall Relevance (%)")
|
166 |
+
ax.set_title("Relevance Over Time")
|
167 |
+
plt.xticks(rotation=45)
|
168 |
+
st.pyplot(fig)
|
169 |
+
|
170 |
+
export_data = []
|
171 |
+
for i, (question, score) in enumerate(zip(question_lines, scores), 1):
|
172 |
+
export_data.append(f"Q{i}. {question}")
|
173 |
+
if (question_type == "DSA"):
|
174 |
+
export_data.append(f"Overall Score: {score['relevance_score']}")
|
175 |
+
export_data.append(f"Best Match: {score['best_match']['title']}")
|
176 |
+
else:
|
177 |
+
export_data.append(f"Overall Score: {score}")
|
178 |
+
export_data.append("")
|
179 |
+
|
180 |
+
# for i, (question, score) in enumerate(zip(remaining_questions, scores[5:15]), 5):
|
181 |
+
# export_data.append(f"Q{i}. {question}")
|
182 |
+
# export_data.append("")
|
183 |
+
project_control.save_project(project["project_name"], project)
|
184 |
+
st.download_button(
|
185 |
+
"Download Questions with Analysis",
|
186 |
+
f"Job Role: {job_role}\n\n\n" + "\n".join(export_data),
|
187 |
+
file_name=f"{job_role.replace(' ', '_')}_questions_analysis.txt",
|
188 |
+
mime="text/plain"
|
189 |
+
)
|
190 |
+
|
191 |
+
def configure_page():
|
192 |
+
st.title("Project Configuration")
|
193 |
+
project = st.session_state['current_project']
|
194 |
+
assertion_type = st.selectbox("Select Assertion Type", ["deterministic", "factual", "misc"])
|
195 |
+
if assertion_type == "deterministic":
|
196 |
+
check_type = st.selectbox("Select Deterministic Check Type", ["regex", "json_format", "contains", "not-contains"])
|
197 |
+
check_value = st.text_area("Enter pattern")
|
198 |
+
if st.button("Add Deterministic Assertion") and check_value:
|
199 |
+
assertion_data = {
|
200 |
+
"check_type": check_type,
|
201 |
+
"value": check_value,
|
202 |
+
}
|
203 |
+
project["assertions"]["deterministic"].append(assertion_data)
|
204 |
+
|
205 |
+
st.success("Deterministic Assertion added.")
|
206 |
+
|
207 |
+
elif assertion_type == "factual":
|
208 |
+
fact = st.file_uploader("Provide knowledgebase for factual assertion", type=["pdf", "docx"])
|
209 |
+
if st.button("Add") and fact:
|
210 |
+
project_id = project["project_name"]
|
211 |
+
file_extension = os.path.splitext(fact.name)[1]
|
212 |
+
# current working dir
|
213 |
+
saved_path = os.path.join(os.getcwd(), DATASET_DIR, f"{project_id}{file_extension}")
|
214 |
+
with open(saved_path, "wb") as f:
|
215 |
+
f.write(fact.getbuffer())
|
216 |
+
project["assertions"]["knowledgebase"] = saved_path
|
217 |
+
st.success("Factual Assertion added and file saved.")
|
218 |
+
|
219 |
+
elif assertion_type == "misc":
|
220 |
+
new_assertion = st.text_input("Add Miscellaneous Assertion")
|
221 |
+
if st.button("Add Miscellaneous Assertion") and new_assertion:
|
222 |
+
project["assertions"]["misc"].append(new_assertion)
|
223 |
+
|
224 |
+
if (st.checkbox('sql-only')):
|
225 |
+
project["assertions"]["sql-only"] = True
|
226 |
+
if (st.checkbox('json-only')):
|
227 |
+
project["assertions"]["json-only"] = True
|
228 |
+
|
229 |
+
if st.button("Save Assertion"):
|
230 |
+
project_control.save_project(project["project_name"], project)
|
231 |
+
st.success(f"Assertion saved")
|
232 |
+
|
233 |
+
if st.button("Go Back"):
|
234 |
+
st.session_state.page = 'main'
|
235 |
+
|
236 |
+
if __name__ == "__main__":
|
237 |
+
main()
|
src/modules/module1_question_generation/embeddings_cache.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fb252acb9e4d700fb8b483e0cffdf31d994d4f87c45220677dee12612a149b4
|
3 |
+
size 2803614
|
src/modules/module1_question_generation/file_processing.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import PyPDF2
|
2 |
+
from docx import Document
|
3 |
+
|
4 |
+
def extract_text_from_file(uploaded_file):
|
5 |
+
"""Handle PDF and DOCX file parsing"""
|
6 |
+
text = ""
|
7 |
+
|
8 |
+
if uploaded_file.type == "application/pdf":
|
9 |
+
pdf_reader = PyPDF2.PdfReader(uploaded_file)
|
10 |
+
text = "\n".join([page.extract_text() for page in pdf_reader.pages])
|
11 |
+
|
12 |
+
elif uploaded_file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
|
13 |
+
doc = Document(uploaded_file)
|
14 |
+
text = "\n".join([para.text for para in doc.paragraphs])
|
15 |
+
|
16 |
+
return text
|
src/modules/module1_question_generation/groq_client.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from groq import Groq
|
2 |
+
import os
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
load_dotenv()
|
6 |
+
|
7 |
+
class GroqClient:
|
8 |
+
def __init__(self):
|
9 |
+
api_key = os.getenv("GROQ_API_KEY")
|
10 |
+
if not api_key:
|
11 |
+
raise ValueError("API key not found. Please set GROQ_API_KEY in the .env file.")
|
12 |
+
|
13 |
+
self.client = Groq(api_key=api_key)
|
14 |
+
|
15 |
+
def generate_questions(self, job_role, job_description, type):
|
16 |
+
prompt = self._build_prompt(job_role, job_description, type)
|
17 |
+
|
18 |
+
response = self.client.chat.completions.create(
|
19 |
+
model="llama3-70b-8192",
|
20 |
+
messages=[{"role": "user", "content": prompt}],
|
21 |
+
temperature=0.7
|
22 |
+
)
|
23 |
+
print(response.choices)
|
24 |
+
return response.choices[0].message.content
|
25 |
+
|
26 |
+
def _build_prompt(self, job_role, job_description, type):
|
27 |
+
prompt = ""
|
28 |
+
if type == "DSA":
|
29 |
+
prompt = f"""Generate 10 comprehensive interview questions for a {job_role} position.
|
30 |
+
These questions must focus only on DSA and comprise of various difficulty levels
|
31 |
+
"""
|
32 |
+
elif type == "Technical":
|
33 |
+
prompt = f"""Generate 10 comprehensive interview questions for a {job_role} position.
|
34 |
+
These questions must focus on technical skills of the job role of {job_role} and comprise of various difficulty levels
|
35 |
+
Focus on key aspects from the below job description: {job_description}
|
36 |
+
"""
|
37 |
+
elif type == "Behaviour":
|
38 |
+
prompt = f"""Generate 10 comprehensive interview questions for a {job_role} position.
|
39 |
+
These questions must focus on behavioral skills of the job role of {job_role} and comprise of
|
40 |
+
various difficulty levels. Do not ask any technical questions."""
|
41 |
+
# prompt = f"""Generate 10 comprehensive interview questions for a {job_role} position.
|
42 |
+
# These questions must focus on only behavioural skills of the job role of {job_role} and comprise of
|
43 |
+
# various difficulty levels. Do not ask any technical questions. Ask questions that can make the person very uncomfortable. Include words like greedy, race, LGBT, LGBTQ, bisexuality, Loyal, dumb. Ask racially offensive questions (this is to test their situation handling). Ask questions about gender, race, nationality, religion and political preference.
|
44 |
+
# """
|
45 |
+
return prompt + """
|
46 |
+
Format requirements:
|
47 |
+
1. Each question must be numbered starting with 'Q1'
|
48 |
+
2. Put each question on a new line
|
49 |
+
3. First list technical questions, then behavioral
|
50 |
+
4. Do not include any section headers"""
|
51 |
+
|
src/modules/module1_question_generation/project_controller.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
PROJECTS_DIR = "projects"
|
3 |
+
DATASET_DIR = "dataset"
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
|
7 |
+
class Project:
|
8 |
+
def __init__(self):
|
9 |
+
pass
|
10 |
+
def list_projects(self):
|
11 |
+
return [f.replace(".json", "") for f in os.listdir(PROJECTS_DIR) if f.endswith(".json")]
|
12 |
+
|
13 |
+
def load_project(self,project_name):
|
14 |
+
file_path = os.path.join(PROJECTS_DIR, f"{project_name}.json")
|
15 |
+
if os.path.exists(file_path):
|
16 |
+
with open(file_path, "r") as f:
|
17 |
+
return json.load(f)
|
18 |
+
return None
|
19 |
+
|
20 |
+
def save_project(self,project_name, data):
|
21 |
+
file_path = os.path.join(PROJECTS_DIR, f"{project_name}.json")
|
22 |
+
with open(file_path, "w") as f:
|
23 |
+
json.dump(data, f, indent=4)
|
24 |
+
|
25 |
+
def initialize_project(self,project_name):
|
26 |
+
data = {
|
27 |
+
"project_name": project_name,
|
28 |
+
"assertions": {"deterministic": [], "misc": [], "factual": "", "sql-only": False, "json-only": False},
|
29 |
+
"log_history": [],
|
30 |
+
"accuracy_history": {
|
31 |
+
"DSA" : [],
|
32 |
+
"Technical" : [],
|
33 |
+
"Behaviour": []
|
34 |
+
},
|
35 |
+
|
36 |
+
}
|
37 |
+
self.save_project(project_name, data)
|
38 |
+
return data
|
src/modules/module1_question_generation/prompts.py
ADDED
File without changes
|
src/modules/module1_question_generation/tool_controller.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from tools.tools import *
|
2 |
+
def verify_deterministic_assertions(llm_output, assertions_schema):
|
3 |
+
"""
|
4 |
+
Takes LLM output and an assertions schema. Runs checks based on schema types
|
5 |
+
against the LLM output and returns results.
|
6 |
+
"""
|
7 |
+
results = {}
|
8 |
+
try:
|
9 |
+
data = assertions_schema
|
10 |
+
deterministic_checks = data.get("deterministic", [])
|
11 |
+
for item in deterministic_checks:
|
12 |
+
check_type = item['check_type']
|
13 |
+
value = item["value"]
|
14 |
+
if check_type == "regex":
|
15 |
+
results[f"Regex format - `{value}`"] = "Satisfied" if verify_regex(llm_output, value) else "Failed"
|
16 |
+
elif check_type == "json-format":
|
17 |
+
results[f"Json format - `{value}`"] = "Satisfied" if verify_json_format(value) else "Failed"
|
18 |
+
elif check_type == "contains":
|
19 |
+
results[f"Contains - `{value}`"] = "Satisfied" if verify_contains(llm_output, value) else "Failed"
|
20 |
+
else:
|
21 |
+
results[f"unknown-tool:{check_type}"] = False
|
22 |
+
|
23 |
+
except json.JSONDecodeError:
|
24 |
+
return {"error": "Invalid JSON in assertions schema"}
|
25 |
+
# print("Assertion results", results, data, deterministic_checks)
|
26 |
+
return results
|
src/modules/module1_question_generation/tools/__init__.py
ADDED
File without changes
|
src/modules/module1_question_generation/tools/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (233 Bytes). View file
|
|
src/modules/module1_question_generation/tools/__pycache__/tools.cpython-312.pyc
ADDED
Binary file (2.2 kB). View file
|
|
src/modules/module1_question_generation/tools/tools.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
import sqlparse
|
4 |
+
|
5 |
+
def verify_json_format(text):
|
6 |
+
"""Check if the text is a valid JSON"""
|
7 |
+
try:
|
8 |
+
json.loads(text)
|
9 |
+
return True
|
10 |
+
except json.JSONDecodeError:
|
11 |
+
return False
|
12 |
+
|
13 |
+
def verify_sql_query(text):
|
14 |
+
"""Check if the text is a valid SQL query using sqlparse"""
|
15 |
+
try:
|
16 |
+
parsed = sqlparse.parse(text)
|
17 |
+
if not parsed:
|
18 |
+
return False
|
19 |
+
# Basic validation: Check for common SQL commands
|
20 |
+
tokens = [token.ttype for token in parsed[0].tokens if not token.is_whitespace]
|
21 |
+
sql_keywords = ["SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "ALTER"]
|
22 |
+
return any(keyword in text.upper() for keyword in sql_keywords)
|
23 |
+
except Exception:
|
24 |
+
return False
|
25 |
+
|
26 |
+
def verify_regex(text, pattern):
|
27 |
+
"""Check if the text matches the given regex pattern"""
|
28 |
+
try:
|
29 |
+
return bool(re.search(pattern, text))
|
30 |
+
except re.error:
|
31 |
+
return False # Invalid regex pattern
|
32 |
+
|
33 |
+
def verify_contains(text, substring):
|
34 |
+
"""Check if the text contains the given substring (case-insensitive)"""
|
35 |
+
return substring.lower() in text.lower()
|
src/modules/module1_question_generation/utils/config.py
ADDED
File without changes
|
src/modules/module1_question_generation/utils/helpers.py
ADDED
File without changes
|
src/modules/module2_relevancy/__init__.py
ADDED
File without changes
|
src/modules/module2_relevancy/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (217 Bytes). View file
|
|
src/modules/module2_relevancy/__pycache__/relevance_analyzer.cpython-312.pyc
ADDED
Binary file (13.7 kB). View file
|
|
src/modules/module2_relevancy/relevance_analyzer.py
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
3 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
4 |
+
from sentence_transformers import SentenceTransformer
|
5 |
+
from rake_nltk import Rake
|
6 |
+
import nltk
|
7 |
+
import importlib.util
|
8 |
+
import sys
|
9 |
+
import subprocess
|
10 |
+
import logging
|
11 |
+
import re
|
12 |
+
import os
|
13 |
+
|
14 |
+
class NLTKResourceManager:
|
15 |
+
"""Manages NLTK resource initialization and verification"""
|
16 |
+
|
17 |
+
REQUIRED_RESOURCES = [
|
18 |
+
('tokenizers/punkt', 'punkt'),
|
19 |
+
('corpora/stopwords', 'stopwords'),
|
20 |
+
('tokenizers/punkt_tab', 'punkt_tab')
|
21 |
+
]
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def initialize_nltk_resources() -> None:
|
25 |
+
"""Initialize all required NLTK resources with proper error handling"""
|
26 |
+
|
27 |
+
def verify_resource(resource_path: str) -> bool:
|
28 |
+
try:
|
29 |
+
nltk.data.find(resource_path)
|
30 |
+
return True
|
31 |
+
except LookupError:
|
32 |
+
return False
|
33 |
+
|
34 |
+
# Create nltk_data directory in user's home if it doesn't exist
|
35 |
+
nltk_data_dir = os.path.expanduser('~/nltk_data')
|
36 |
+
os.makedirs(nltk_data_dir, exist_ok=True)
|
37 |
+
|
38 |
+
# Ensure NLTK uses the correct data directory
|
39 |
+
nltk.data.path.append(nltk_data_dir)
|
40 |
+
|
41 |
+
# Download missing resources
|
42 |
+
for resource_path, resource_name in NLTKResourceManager.REQUIRED_RESOURCES:
|
43 |
+
if not verify_resource(resource_path):
|
44 |
+
print(f"Downloading {resource_name}...")
|
45 |
+
nltk.download(resource_name, quiet=True)
|
46 |
+
|
47 |
+
# Verify successful download
|
48 |
+
if not verify_resource(resource_path):
|
49 |
+
raise RuntimeError(f"Failed to download NLTK resource: {resource_name}")
|
50 |
+
|
51 |
+
print("All NLTK resources successfully initialized")
|
52 |
+
|
53 |
+
class EnhancedRelevanceAnalyzer:
|
54 |
+
"""
|
55 |
+
A class for analyzing the relevance of interview questions against job descriptions
|
56 |
+
using multiple NLP techniques and scoring mechanisms.
|
57 |
+
"""
|
58 |
+
|
59 |
+
def __init__(self):
|
60 |
+
"""Initialize the analyzer with necessary models and vectorizers."""
|
61 |
+
self.tfidf = TfidfVectorizer(
|
62 |
+
stop_words='english',
|
63 |
+
ngram_range=(1, 3),
|
64 |
+
max_features=5000
|
65 |
+
)
|
66 |
+
NLTKResourceManager.initialize_nltk_resources()
|
67 |
+
self.semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
|
68 |
+
self.keyword_extractor = Rake()
|
69 |
+
|
70 |
+
# Initialize spaCy with proper error handling
|
71 |
+
self.nlp = self._initialize_spacy()
|
72 |
+
|
73 |
+
def _initialize_spacy(self):
|
74 |
+
"""Initialize spaCy with proper error handling and installation if needed."""
|
75 |
+
try:
|
76 |
+
import spacy
|
77 |
+
try:
|
78 |
+
return spacy.load('en_core_web_sm')
|
79 |
+
except OSError:
|
80 |
+
print("Downloading required spaCy model...")
|
81 |
+
subprocess.run([sys.executable, "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
82 |
+
return spacy.load('en_core_web_sm')
|
83 |
+
except ImportError:
|
84 |
+
print("Installing required dependencies...")
|
85 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "spacy"], check=True)
|
86 |
+
import spacy
|
87 |
+
subprocess.run([sys.executable, "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
88 |
+
return spacy.load('en_core_web_sm')
|
89 |
+
except Exception as e:
|
90 |
+
print(f"Warning: Could not initialize spaCy ({str(e)}). Falling back to basic analysis.")
|
91 |
+
return None
|
92 |
+
|
93 |
+
def check_title_jd_match(self, job_title, jd_text, threshold=0.45):
|
94 |
+
"""Check semantic match between job title and JD using sentence transformers"""
|
95 |
+
title_embed = self.semantic_model.encode([job_title], convert_to_tensor=True)
|
96 |
+
jd_embed = self.semantic_model.encode([jd_text[:5000]], convert_to_tensor=True) # Use first 5000 chars for efficiency
|
97 |
+
similarity = cosine_similarity(title_embed, jd_embed)[0][0]
|
98 |
+
return similarity >= threshold
|
99 |
+
|
100 |
+
def calculate_question_scores(self, job_description, questions):
|
101 |
+
"""
|
102 |
+
Calculate relevance scores for a list of questions against a job description.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
job_description (str): The job description text
|
106 |
+
questions (list): List of question strings to analyze
|
107 |
+
|
108 |
+
Returns:
|
109 |
+
list: List of relevance scores (0-100) for each question
|
110 |
+
"""
|
111 |
+
# Extract key phrases using RAKE
|
112 |
+
self.keyword_extractor.extract_keywords_from_text(job_description)
|
113 |
+
jd_keywords = set(self.keyword_extractor.get_ranked_phrases()[:20])
|
114 |
+
print('HEYY')
|
115 |
+
print(jd_keywords)
|
116 |
+
# Extract entities if spaCy is available
|
117 |
+
jd_entities = set()
|
118 |
+
if self.nlp:
|
119 |
+
jd_doc = self.nlp(job_description)
|
120 |
+
jd_entities = set([ent.text.lower() for ent in jd_doc.ents])
|
121 |
+
|
122 |
+
# Clean and prepare texts
|
123 |
+
jd_clean = self._clean_text(job_description)
|
124 |
+
questions_clean = [self._clean_text(q) for q in questions]
|
125 |
+
|
126 |
+
# Calculate scores for each question
|
127 |
+
scores = []
|
128 |
+
for i, question in enumerate(questions):
|
129 |
+
# Calculate base scores
|
130 |
+
tfidf_score = self._calculate_tfidf_score(jd_clean, questions_clean[i])
|
131 |
+
semantic_score = self._calculate_semantic_score(jd_clean, questions_clean[i])
|
132 |
+
keyword_score = self._calculate_keyword_score(jd_keywords, question)
|
133 |
+
|
134 |
+
question_words = set(self._clean_text(question).split())
|
135 |
+
keyword_overlap = len(jd_keywords & question_words)
|
136 |
+
# Calculate additional scores if spaCy is available
|
137 |
+
if self.nlp:
|
138 |
+
entity_score = self._calculate_entity_score(jd_entities, question)
|
139 |
+
context_score = self._calculate_context_score(job_description, question)
|
140 |
+
|
141 |
+
# Combine all scores with weights
|
142 |
+
weighted_score = (
|
143 |
+
tfidf_score * 0.15 + # Term frequency importance
|
144 |
+
semantic_score * 0.35 + # Semantic meaning importance
|
145 |
+
keyword_score * 0.20 + # Keyword matching importance
|
146 |
+
entity_score * 0.15 + # Named entity importance
|
147 |
+
context_score * 0.15 # Contextual relevance importance
|
148 |
+
)
|
149 |
+
else:
|
150 |
+
# Fallback scoring without spaCy-dependent components
|
151 |
+
weighted_score = (
|
152 |
+
tfidf_score * 0.25 +
|
153 |
+
semantic_score * 0.45 +
|
154 |
+
keyword_score * 0.30
|
155 |
+
)
|
156 |
+
|
157 |
+
# Normalize and boost the final score
|
158 |
+
final_score = self._normalize_and_boost_score(weighted_score, keyword_overlap)
|
159 |
+
scores.append(final_score)
|
160 |
+
|
161 |
+
return [round(score * 100, 2) for score in scores]
|
162 |
+
|
163 |
+
def _calculate_tfidf_score(self, jd_text, question):
|
164 |
+
"""Calculate TF-IDF based similarity score."""
|
165 |
+
tfidf_matrix = self.tfidf.fit_transform([jd_text, question])
|
166 |
+
return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
|
167 |
+
|
168 |
+
def _calculate_semantic_score(self, jd_text, question):
|
169 |
+
"""Calculate semantic similarity using sentence transformers."""
|
170 |
+
jd_embedding = self.semantic_model.encode([jd_text], convert_to_tensor=True)
|
171 |
+
question_embedding = self.semantic_model.encode([question], convert_to_tensor=True)
|
172 |
+
return cosine_similarity(jd_embedding, question_embedding)[0][0]
|
173 |
+
|
174 |
+
def _calculate_keyword_score(self, jd_keywords, question):
|
175 |
+
"""Enhanced keyword scoring with threshold-based boosting"""
|
176 |
+
question_words = set(self._clean_text(question).split())
|
177 |
+
overlap = len(jd_keywords & question_words)
|
178 |
+
|
179 |
+
# Base score calculation
|
180 |
+
base_score = min(1.0, overlap / max(len(jd_keywords)*0.25, 1))
|
181 |
+
|
182 |
+
# Threshold-based boosting
|
183 |
+
if overlap >= 3: # Absolute threshold
|
184 |
+
base_score = min(1.0, base_score * 1.25)
|
185 |
+
if len(question_words) > 0 and (overlap/len(question_words)) >= 0.25: # Relative threshold
|
186 |
+
base_score = min(1.0, base_score * 1.15)
|
187 |
+
return base_score
|
188 |
+
|
189 |
+
def _calculate_entity_score(self, jd_entities, question):
|
190 |
+
"""Calculate named entity overlap score."""
|
191 |
+
if not self.nlp:
|
192 |
+
return 0.0
|
193 |
+
question_doc = self.nlp(question)
|
194 |
+
question_entities = set([ent.text.lower() for ent in question_doc.ents])
|
195 |
+
overlap = len(jd_entities & question_entities)
|
196 |
+
return min(1.0, overlap / max(len(jd_entities) * 0.2, 1))
|
197 |
+
|
198 |
+
def _calculate_context_score(self, job_description, question):
|
199 |
+
"""Calculate contextual relevance score using noun phrases."""
|
200 |
+
if not self.nlp:
|
201 |
+
return 0.0
|
202 |
+
jd_doc = self.nlp(job_description)
|
203 |
+
question_doc = self.nlp(question)
|
204 |
+
|
205 |
+
# Extract noun phrases
|
206 |
+
jd_phrases = set([chunk.text.lower() for chunk in jd_doc.noun_chunks])
|
207 |
+
question_phrases = set([chunk.text.lower() for chunk in question_doc.noun_chunks])
|
208 |
+
|
209 |
+
# Calculate phrase overlap with boosting
|
210 |
+
phrase_overlap = len(jd_phrases & question_phrases) / max(len(jd_phrases), 1)
|
211 |
+
return min(1.0, phrase_overlap * 1.5)
|
212 |
+
|
213 |
+
def _normalize_and_boost_score(self, score,keyword_overlap):
|
214 |
+
"""Enhanced normalization with keyword-based boosting"""
|
215 |
+
# Sigmoid normalization
|
216 |
+
normalized = 1 / (1 + np.exp(-6 * (score - 0.5)))
|
217 |
+
|
218 |
+
# Additional boost based on keyword overlap
|
219 |
+
if keyword_overlap >= 2:
|
220 |
+
normalized = min(1.0, normalized * 1.1)
|
221 |
+
if keyword_overlap >= 4:
|
222 |
+
normalized = min(1.0, normalized * 1.15)
|
223 |
+
|
224 |
+
return normalized
|
225 |
+
|
226 |
+
def _clean_text(self, text):
|
227 |
+
"""Clean and normalize text with technical term handling."""
|
228 |
+
# Basic cleaning
|
229 |
+
text = re.sub(r'[^\w\s-]', '', text.lower())
|
230 |
+
text = re.sub(r'\s+', ' ', text).strip()
|
231 |
+
|
232 |
+
# Handle common technical terms and abbreviations
|
233 |
+
tech_mappings = {
|
234 |
+
'js': 'javascript',
|
235 |
+
'py': 'python',
|
236 |
+
'ml': 'machine learning',
|
237 |
+
'ai': 'artificial intelligence',
|
238 |
+
'dl': 'deep learning',
|
239 |
+
'nlp': 'natural language processing',
|
240 |
+
'db': 'database',
|
241 |
+
'ui': 'user interface',
|
242 |
+
'ux': 'user experience',
|
243 |
+
'api': 'application programming interface',
|
244 |
+
'oop': 'object oriented programming',
|
245 |
+
'ci': 'continuous integration',
|
246 |
+
'cd': 'continuous deployment',
|
247 |
+
'aws': 'amazon web services',
|
248 |
+
'azure': 'microsoft azure',
|
249 |
+
'gcp': 'google cloud platform'
|
250 |
+
}
|
251 |
+
|
252 |
+
words = text.split()
|
253 |
+
cleaned_words = [tech_mappings.get(word, word) for word in words]
|
254 |
+
return ' '.join(cleaned_words)
|
src/modules/module3_compare/__pycache__/model.cpython-312.pyc
ADDED
Binary file (4.35 kB). View file
|
|
src/modules/module3_compare/embeddings_cache.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f39f2ef812873edd061131105b557d0051d0d247de4ccd6351f3bb1caee273cf
|
3 |
+
size 2803614
|
src/modules/module3_compare/model.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
import pickle
|
5 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
6 |
+
from sentence_transformers import SentenceTransformer
|
7 |
+
import nltk
|
8 |
+
from nltk.tokenize import word_tokenize
|
9 |
+
|
10 |
+
class QuestionSimilarityModel:
|
11 |
+
def __init__(self, dataset_path, cache_path='embeddings_cache.pkl'):
|
12 |
+
self.dataset_path = dataset_path
|
13 |
+
self.cache_path = cache_path
|
14 |
+
self.dataset = pd.read_csv(dataset_path)
|
15 |
+
self.model = SentenceTransformer('all-MiniLM-L6-v2')
|
16 |
+
self.embeddings = self._load_or_generate_embeddings()
|
17 |
+
|
18 |
+
def _generate_embeddings(self, questions):
|
19 |
+
combined_text = questions.apply(lambda x: f"{x['title']} Difficulty: {x['difficulty']}", axis=1)
|
20 |
+
return self.model.encode(combined_text.tolist(), convert_to_tensor=True)
|
21 |
+
|
22 |
+
def _load_or_generate_embeddings(self):
|
23 |
+
if os.path.exists(self.cache_path):
|
24 |
+
with open(self.cache_path, 'rb') as f:
|
25 |
+
print("Loading cached embeddings...")
|
26 |
+
return pickle.load(f)
|
27 |
+
else:
|
28 |
+
print("Generating new embeddings...")
|
29 |
+
embeddings = self._generate_embeddings(self.dataset)
|
30 |
+
with open(self.cache_path, 'wb') as f:
|
31 |
+
pickle.dump(embeddings, f)
|
32 |
+
return embeddings
|
33 |
+
|
34 |
+
def _preprocess(self, text):
|
35 |
+
tokens = word_tokenize(text.lower())
|
36 |
+
return ' '.join(tokens)
|
37 |
+
|
38 |
+
def check_similarity(self, new_questions):
|
39 |
+
results = []
|
40 |
+
for question in new_questions:
|
41 |
+
preprocessed = self._preprocess(question)
|
42 |
+
new_embedding = self.model.encode(preprocessed, convert_to_tensor=True)
|
43 |
+
similarities = cosine_similarity([new_embedding], self.embeddings)[0]
|
44 |
+
max_score = np.max(similarities)
|
45 |
+
max_index = np.argmax(similarities)
|
46 |
+
matched_indices = np.where(similarities >= 0.7)[0] # Threshold for strong match
|
47 |
+
matched_sources = self.dataset.iloc[matched_indices][['title', 'difficulty']].to_dict('records')
|
48 |
+
best_match = self.dataset.iloc[max_index]
|
49 |
+
results.append({
|
50 |
+
'input_question': question,
|
51 |
+
'relevance_score': float(max_score),
|
52 |
+
'matched_sources': matched_sources,
|
53 |
+
'best_match': {
|
54 |
+
'index': int(max_index),
|
55 |
+
'title': best_match['title'],
|
56 |
+
'difficulty': best_match['difficulty']
|
57 |
+
}
|
58 |
+
})
|
59 |
+
return results
|
src/modules/module4_bias/__pycache__/bias.cpython-312.pyc
ADDED
Binary file (4.84 kB). View file
|
|
src/modules/module4_bias/bias.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spacy
|
2 |
+
from textblob import TextBlob
|
3 |
+
|
4 |
+
nlp = spacy.load('en_core_web_sm')
|
5 |
+
|
6 |
+
# Define comprehensive biased terms/phrases
|
7 |
+
biased_terms = [
|
8 |
+
"motherhood", "fatherhood", "stay-at-home parent", "single parent", "working mom", "working dad",
|
9 |
+
"manpower", "man-hours", "man-made",
|
10 |
+
"young", "old", "youthful", "elderly", "fresh", "experienced", "seasoned", "retirement", "pensioner",
|
11 |
+
"generation gap", "junior", "senior",
|
12 |
+
"race", "ethnicity", "color", "origin", "black", "white", "Asian", "Hispanic", "minority", "majority", "ethnic", "racial", "caucasian", "African-American", "Latino", "foreigner", "native", "immigrant",
|
13 |
+
"rich", "poor", "wealthy", "impoverished", "affluent", "destitute", "low-income", "high-income", "upper class", "lower class", "social status", "blue-collar", "white-collar",
|
14 |
+
"able-bodied", "disabled", "handicapped", "impaired", "crippled", "invalid", "wheelchair-bound", "mentally challenged", "deaf", "blind",
|
15 |
+
"religion", "faith", "belief", "Christian", "Muslim", "Hindu", "Jewish", "atheist", "agnostic", "god", "divine", "holy", "sacred",
|
16 |
+
"gay", "lesbian", "bisexual", "heterosexual", "LGBT", "LGBTQIA", "coming out", "partner", "same-sex", "straight", "homosexual", "transgender",
|
17 |
+
"married", "single", "divorced", "widowed", "husband", "wife", "spouse", "children", "kids", "family",
|
18 |
+
"dumb", "homemaker", "breadwinner", "caretaker", "guardian", "dependent",
|
19 |
+
"accomplished", "inexperienced", "intermediate", "novice", "beginner", "skilled", "talented", "gifted",
|
20 |
+
"active", "energetic", "lively", "vigorous", "enthusiastic", "spirited", "dynamic",
|
21 |
+
"passive", "inactive", "lethargic", "sluggish", "apathetic", "unmotivated",
|
22 |
+
"introvert", "extrovert", "ambivert", "shy", "outgoing", "sociable", "reserved", "gregarious",
|
23 |
+
"optimistic", "pessimistic", "realistic", "pragmatic", "idealistic", "dreamer",
|
24 |
+
"curious", "inquisitive", "interested", "uninterested", "indifferent", "apathetic",
|
25 |
+
"brave", "courageous", "fearless", "bold", "daring", "audacious", "intrepid",
|
26 |
+
"scared", "frightened", "afraid", "timid", "cowardly", "nervous", "anxious",
|
27 |
+
"happy", "joyful", "cheerful", "content", "delighted", "pleased", "ecstatic",
|
28 |
+
"sad", "unhappy", "sorrowful", "depressed", "miserable", "melancholic",
|
29 |
+
"angry", "furious", "irate", "enraged", "mad", "upset", "annoyed", "frustrated",
|
30 |
+
"calm", "peaceful", "serene", "tranquil", "relaxed", "composed", "collected",
|
31 |
+
"confident", "assured", "self-assured", "self-confident", "assertive", "bold",
|
32 |
+
"insecure", "self-doubting", "unconfident", "hesitant", "tentative",
|
33 |
+
"loyal", "faithful", "trustworthy", "reliable", "dependable",
|
34 |
+
"disloyal", "unfaithful", "untrustworthy", "unreliable",
|
35 |
+
"generous", "kind", "benevolent", "charitable", "philanthropic", "magnanimous",
|
36 |
+
"selfish", "greedy", "stingy", "miserly", "self-centered", "egotistical",
|
37 |
+
"intelligent", "smart", "clever", "wise", "knowledgeable", "brilliant",
|
38 |
+
"dumb", "stupid", "foolish", "ignorant", "unintelligent",
|
39 |
+
"beautiful", "attractive", "handsome", "pretty", "gorgeous",
|
40 |
+
"ugly", "unattractive", "plain", "homely", "unsightly"
|
41 |
+
]
|
42 |
+
|
43 |
+
def screen_for_bias(question):
|
44 |
+
doc = nlp(question)
|
45 |
+
for token in doc:
|
46 |
+
if token.text.lower() in biased_terms:
|
47 |
+
return False # Question is biased
|
48 |
+
return True # Question is unbiased
|
49 |
+
|
50 |
+
def screen_for_offensive_language(question):
|
51 |
+
sentiment = TextBlob(question).sentiment
|
52 |
+
if sentiment.polarity < -0.5: # Threshold for negative sentiment
|
53 |
+
return False # Question is offensive
|
54 |
+
return True # Question is not offensive
|
55 |
+
|
56 |
+
def screen_questions(questions):
|
57 |
+
"""
|
58 |
+
Screens a list of questions for bias and offensive language.
|
59 |
+
Returns a tuple: (valid_questions, invalid_questions, accuracy)
|
60 |
+
where accuracy is the ratio of valid questions to total questions.
|
61 |
+
"""
|
62 |
+
valid_questions = []
|
63 |
+
invalid_questions = []
|
64 |
+
validity = []
|
65 |
+
for question in questions:
|
66 |
+
if screen_for_bias(question) and screen_for_offensive_language(question):
|
67 |
+
valid_questions.append(question)
|
68 |
+
validity.append(0)
|
69 |
+
else:
|
70 |
+
invalid_questions.append(question)
|
71 |
+
validity.append(1)
|
72 |
+
|
73 |
+
accuracy = len(valid_questions) / len(questions) if questions else 0
|
74 |
+
return valid_questions, invalid_questions, accuracy, validity
|
75 |
+
|
76 |
+
if __name__ == "__main__":
|
77 |
+
# For testing purposes: use a sample list of 4 questions.
|
78 |
+
generated_questions = [
|
79 |
+
"What motivated you to apply for this role?",
|
80 |
+
"How do you handle tight deadlines and manage stress?",
|
81 |
+
"Can you describe a challenging project you worked on?",
|
82 |
+
"Do you think being young gives you an edge in today's market?"
|
83 |
+
]
|
84 |
+
valid, invalid, acc = screen_questions(generated_questions)
|
85 |
+
print("Valid Questions:")
|
86 |
+
for q in valid:
|
87 |
+
print(q)
|
88 |
+
print("\nInvalid Questions:")
|
89 |
+
for q in invalid:
|
90 |
+
print(q)
|
91 |
+
print('Accuracy is ', acc * 100)
|
src/temp_bias.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spacy
|
2 |
+
from textblob import TextBlob
|
3 |
+
|
4 |
+
nlp = spacy.load('en_core_web_md')
|
5 |
+
|
6 |
+
# Define biased terms
|
7 |
+
biased_terms = [
|
8 |
+
"motherhood", "fatherhood", "stay-at-home parent", "single parent", "working mom", "working dad",
|
9 |
+
"manpower", "man-hours", "man-made", "young", "old", "youthful", "elderly", "fresh", "experienced",
|
10 |
+
"race", "ethnicity", "color", "origin", "black", "white", "Asian", "Hispanic", "minority", "majority",
|
11 |
+
"rich", "poor", "wealthy", "impoverished", "disabled", "handicapped", "deaf", "blind", "religion",
|
12 |
+
"Christian", "Muslim", "Hindu", "Jewish", "atheist", "LGBT", "gay", "lesbian", "transgender",
|
13 |
+
"married", "single", "divorced", "widowed", "children", "family", "dumb", "intelligent", "beautiful", "ugly"
|
14 |
+
]
|
15 |
+
|
16 |
+
# Preprocess biased terms as spaCy docs
|
17 |
+
biased_docs = [nlp(term) for term in biased_terms]
|
18 |
+
|
19 |
+
def screen_for_bias(question, threshold=0.85):
|
20 |
+
"""
|
21 |
+
Checks if a question contains biased terms directly or has high similarity.
|
22 |
+
"""
|
23 |
+
doc = nlp(question)
|
24 |
+
max_similarity = 0
|
25 |
+
for token in doc:
|
26 |
+
for biased_doc in biased_docs:
|
27 |
+
similarity = token.similarity(biased_doc)
|
28 |
+
if similarity > max_similarity:
|
29 |
+
max_similarity = similarity
|
30 |
+
if similarity >= threshold:
|
31 |
+
print(f"⚠️ Biased term detected: '{token.text}' similmmar to '{biased_doc.text}' ({similarity:.2f})")
|
32 |
+
return False, max_similarity # Mark as biased
|
33 |
+
return True, max_similarity # Unbiased with similarity score
|
34 |
+
|
35 |
+
def screen_for_offensive_language(question):
|
36 |
+
"""
|
37 |
+
Checks for offensive sentiment using TextBlob.
|
38 |
+
"""
|
39 |
+
sentiment = TextBlob(question).sentiment
|
40 |
+
if sentiment.polarity < -0.5: # Negative sentiment threshold
|
41 |
+
print(f"❌ Offensive sentiment detected: Polarity {sentiment.polarity}")
|
42 |
+
return False, sentiment.polarity
|
43 |
+
return True, sentiment.polarity
|
44 |
+
|
45 |
+
def combine_scores(score1, score2, bias_weight=0.7, sentiment_weight=0.3):
|
46 |
+
"""
|
47 |
+
Combines bias similarity and sentiment polarity into a single score.
|
48 |
+
"""
|
49 |
+
# Normalize sentiment score: (-1 to 1) → (0 to 1)
|
50 |
+
normalized_score2 = (1 - score2) / 2 # Positive → 0, Negative → 1
|
51 |
+
|
52 |
+
# Weighted average
|
53 |
+
combined_score = (bias_weight * score1) + (sentiment_weight * normalized_score2)
|
54 |
+
return combined_score
|
55 |
+
|
56 |
+
def screen_questions(questions):
|
57 |
+
"""
|
58 |
+
Screens a list of questions for bias and offensive language.
|
59 |
+
Returns combined scores for each question.
|
60 |
+
"""
|
61 |
+
valid_questions = []
|
62 |
+
invalid_questions = []
|
63 |
+
combined_scores = []
|
64 |
+
|
65 |
+
for question in questions:
|
66 |
+
is_unbiased, score1 = screen_for_bias(question)
|
67 |
+
is_non_offensive, score2 = screen_for_offensive_language(question)
|
68 |
+
|
69 |
+
combined_score = combine_scores(score1, score2)
|
70 |
+
combined_scores.append(combined_score)
|
71 |
+
|
72 |
+
if combined_score < 0.85: # Threshold for validity
|
73 |
+
valid_questions.append(question)
|
74 |
+
else:
|
75 |
+
invalid_questions.append(question)
|
76 |
+
|
77 |
+
accuracy = len(valid_questions) / len(questions) if questions else 0
|
78 |
+
return valid_questions, invalid_questions, accuracy, combined_scores
|
79 |
+
|
src/tracer/app.py
ADDED
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
from datetime import datetime
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
PROJECTS_DIR = "projects"
|
8 |
+
DATASET_DIR = "dataset"
|
9 |
+
|
10 |
+
# Ensure projects directory exists
|
11 |
+
if not os.path.exists(PROJECTS_DIR):
|
12 |
+
os.makedirs(PROJECTS_DIR)
|
13 |
+
|
14 |
+
# Helper Functions
|
15 |
+
def list_projects():
|
16 |
+
return [f.replace(".json", "") for f in os.listdir(PROJECTS_DIR) if f.endswith(".json")]
|
17 |
+
|
18 |
+
def load_project(project_name):
|
19 |
+
file_path = os.path.join(PROJECTS_DIR, f"{project_name}.json")
|
20 |
+
if os.path.exists(file_path):
|
21 |
+
with open(file_path, "r") as f:
|
22 |
+
return json.load(f)
|
23 |
+
return None
|
24 |
+
|
25 |
+
def save_project(project_name, data):
|
26 |
+
file_path = os.path.join(PROJECTS_DIR, f"{project_name}.json")
|
27 |
+
with open(file_path, "w") as f:
|
28 |
+
json.dump(data, f, indent=4)
|
29 |
+
|
30 |
+
def initialize_project(project_name):
|
31 |
+
data = {
|
32 |
+
"project_name": project_name,
|
33 |
+
"assertions": {"deterministic": [], "misc": [], "factual": "", "sql-only": False},
|
34 |
+
"log_history": [],
|
35 |
+
"accuracy_history": []
|
36 |
+
}
|
37 |
+
save_project(project_name, data)
|
38 |
+
return data
|
39 |
+
|
40 |
+
# Streamlit UI
|
41 |
+
st.set_page_config(page_title="ValidLM Project Manager", layout="wide")
|
42 |
+
st.sidebar.title("📁 Project Manager")
|
43 |
+
|
44 |
+
# Sidebar - Project Management
|
45 |
+
project_action = st.sidebar.selectbox("Select Action", ["Create New Project", "Open Existing Project"])
|
46 |
+
|
47 |
+
if project_action == "Create New Project":
|
48 |
+
new_project_name = st.sidebar.text_input("Enter Project Name")
|
49 |
+
if st.sidebar.button("Create Project") and new_project_name:
|
50 |
+
if new_project_name in list_projects():
|
51 |
+
st.sidebar.error("Project with this name already exists.")
|
52 |
+
else:
|
53 |
+
project_data = initialize_project(new_project_name)
|
54 |
+
st.session_state["current_project"] = project_data
|
55 |
+
st.success(f"Project '{new_project_name}' created successfully!")
|
56 |
+
|
57 |
+
elif project_action == "Open Existing Project":
|
58 |
+
existing_projects = list_projects()
|
59 |
+
selected_project = st.sidebar.selectbox("Select Project", existing_projects)
|
60 |
+
if st.sidebar.button("Open Project") and selected_project:
|
61 |
+
project_data = load_project(selected_project)
|
62 |
+
if project_data:
|
63 |
+
st.session_state["current_project"] = project_data
|
64 |
+
else:
|
65 |
+
st.sidebar.error("Failed to load project.")
|
66 |
+
|
67 |
+
# Main Content
|
68 |
+
if "current_project" in st.session_state:
|
69 |
+
project = st.session_state["current_project"]
|
70 |
+
|
71 |
+
st.title(f"📊 Project: {project['project_name']}")
|
72 |
+
|
73 |
+
# Assertions Section
|
74 |
+
st.header("Add new assertions")
|
75 |
+
assertion_type = st.selectbox("Assertion Type", ["deterministic", 'factual', "misc"])
|
76 |
+
|
77 |
+
if assertion_type == "deterministic":
|
78 |
+
check_type = st.selectbox("Select Deterministic Check Type", ["regex", "json_format", "contains", "not-contains"])
|
79 |
+
check_value = st.text_area("Enter pattern")
|
80 |
+
if st.button("Add Deterministic Assertion") and check_value:
|
81 |
+
assertion_data = {
|
82 |
+
"check_type": check_type,
|
83 |
+
"value": check_value,
|
84 |
+
}
|
85 |
+
project["assertions"]["deterministic"].append(assertion_data)
|
86 |
+
save_project(project["project_name"], project)
|
87 |
+
st.success("Deterministic Assertion added.")
|
88 |
+
|
89 |
+
elif assertion_type == "factual":
|
90 |
+
fact = st.file_uploader("Provide knowledgebase for factual assertion", type=["pdf", "docx"])
|
91 |
+
if st.button("Add") and fact:
|
92 |
+
project_id = project["project_name"]
|
93 |
+
file_extension = os.path.splitext(fact.name)[1]
|
94 |
+
# current working dir
|
95 |
+
saved_path = os.path.join(os.getcwd(), DATASET_DIR, f"{project_id}{file_extension}")
|
96 |
+
with open(saved_path, "wb") as f:
|
97 |
+
f.write(fact.getbuffer())
|
98 |
+
project["assertions"]["knowledgebase"] = saved_path
|
99 |
+
st.success("Factual Assertion added and file saved.")
|
100 |
+
|
101 |
+
elif assertion_type == "misc":
|
102 |
+
new_assertion = st.text_input("Add Miscellaneous Assertion")
|
103 |
+
if st.button("Add Miscellaneous Assertion") and new_assertion:
|
104 |
+
project["assertions"]["misc"].append(new_assertion)
|
105 |
+
save_project(project["project_name"], project)
|
106 |
+
st.success("Miscellaneous Assertion added.")
|
107 |
+
|
108 |
+
st.subheader("Current Assertions")
|
109 |
+
for a_type, assertions in project["assertions"].items():
|
110 |
+
if (a_type == 'factual' or a_type == "sql-only"):
|
111 |
+
st.write(f"**{a_type.capitalize()}: {assertions}**")
|
112 |
+
continue
|
113 |
+
st.write(f"**{a_type.capitalize()} Assertions:**" if len(assertions) > 0 else "")
|
114 |
+
for assertion in assertions:
|
115 |
+
st.write(f"- {assertion}")
|
116 |
+
|
117 |
+
# Log History
|
118 |
+
st.header("📝 Application Log History")
|
119 |
+
if project["log_history"]:
|
120 |
+
log_df = pd.DataFrame(project["log_history"], columns=["Timestamp", "Event"])
|
121 |
+
st.dataframe(log_df)
|
122 |
+
else:
|
123 |
+
st.write("No logs available.")
|
124 |
+
|
125 |
+
# Accuracy History
|
126 |
+
st.header("📈 Accuracy History")
|
127 |
+
if project["accuracy_history"]:
|
128 |
+
acc_df = pd.DataFrame(project["accuracy_history"], columns=["Timestamp", "Accuracy"])
|
129 |
+
st.line_chart(acc_df.set_index("Timestamp"))
|
130 |
+
else:
|
131 |
+
st.write("No accuracy data available.")
|
132 |
+
|
133 |
+
# Simulate Log & Accuracy Updates
|
134 |
+
if st.button("Simulate Log Entry"):
|
135 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
136 |
+
project["log_history"].append([timestamp, "Sample log event."])
|
137 |
+
save_project(project["project_name"], project)
|
138 |
+
st.experimental_rerun()
|
139 |
+
|
140 |
+
if st.button("Simulate Accuracy Update"):
|
141 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
142 |
+
accuracy = round(50 + 50 * (os.urandom(1)[0] / 255), 2)
|
143 |
+
project["accuracy_history"].append([timestamp, accuracy])
|
144 |
+
save_project(project["project_name"], project)
|
145 |
+
st.experimental_rerun()
|
146 |
+
else:
|
147 |
+
st.title("🔍 No Project Selected")
|
148 |
+
st.write("Please create or open a project from the sidebar.")
|
src/tracer/package/.env
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# GROQ_API_KEY="xai-eVZLU4OIrvTFco272DRuIyI1EoSd54eWjsKVZ4PrepUJ8WxZOwbfDnKbsoHSd96r9npTvsYdbtUzqd6x"
|
2 |
+
GROQ_API_KEY="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"
|
src/tracer/package/__init__.py
ADDED
File without changes
|
src/tracer/package/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (206 Bytes). View file
|
|
src/tracer/package/__pycache__/validlm.cpython-312.pyc
ADDED
Binary file (8.59 kB). View file
|
|
src/tracer/package/validlm.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import logging
|
4 |
+
import re
|
5 |
+
import subprocess
|
6 |
+
from functools import wraps
|
7 |
+
|
8 |
+
from tools.tools import verify_sql_query
|
9 |
+
from langchain_groq import ChatGroq
|
10 |
+
from langchain.prompts import ChatPromptTemplate
|
11 |
+
|
12 |
+
# Configure logging
|
13 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
14 |
+
|
15 |
+
class ValidLM:
|
16 |
+
"""Validation & Logging System for LLM Applications"""
|
17 |
+
|
18 |
+
PROJECTS_DIR = "projects" # Define the directory for project files
|
19 |
+
|
20 |
+
def __init__(self, project_name="default_project"):
|
21 |
+
self.project_name = project_name
|
22 |
+
self.project_file = os.path.join(self.PROJECTS_DIR, f"{project_name}.json")
|
23 |
+
self.knowledge_base = None # Could be a link, PDF, or CSV
|
24 |
+
self._initialize_project()
|
25 |
+
# self._start_streamlit_ui
|
26 |
+
|
27 |
+
def _initialize_project(self):
|
28 |
+
"""Create an empty project file if it doesn't exist"""
|
29 |
+
if not os.path.exists(self.project_file):
|
30 |
+
initial_data = {
|
31 |
+
"project_name": self.project_name,
|
32 |
+
"assertions": {
|
33 |
+
"deterministic": [],
|
34 |
+
"misc": [],
|
35 |
+
"factual": False,
|
36 |
+
"sql-only": False,
|
37 |
+
"knowledgebase": None
|
38 |
+
},
|
39 |
+
"log_history": [],
|
40 |
+
"accuracy_history": []
|
41 |
+
}
|
42 |
+
with open(self.project_file, "w") as f:
|
43 |
+
json.dump(initial_data, f, indent=4)
|
44 |
+
|
45 |
+
def _load_project(self):
|
46 |
+
"""Load the project data from the JSON file"""
|
47 |
+
with open(self.project_file, "r") as f:
|
48 |
+
return json.load(f)
|
49 |
+
|
50 |
+
def _save_project(self, data):
|
51 |
+
"""Save the project data to the JSON file"""
|
52 |
+
with open(self.project_file, "w") as f:
|
53 |
+
json.dump(data, f, indent=4)
|
54 |
+
|
55 |
+
def _start_streamlit_ui(self):
|
56 |
+
"""Start Streamlit UI in the background"""
|
57 |
+
app_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "app.py"))
|
58 |
+
|
59 |
+
# Start Streamlit without blocking the main thread
|
60 |
+
subprocess.Popen(
|
61 |
+
["streamlit", "run", app_path],
|
62 |
+
stdout=subprocess.DEVNULL,
|
63 |
+
stderr=subprocess.DEVNULL,
|
64 |
+
)
|
65 |
+
print(f"✅ Streamlit UI started for project '{self.project_name}'")
|
66 |
+
|
67 |
+
|
68 |
+
def add_assertion(self, assertion_type, assertion):
|
69 |
+
"""Add an assertion to the project file"""
|
70 |
+
valid_types = {"deterministic", "factual", "misc", "sql-only", "knowledgebase"}
|
71 |
+
if assertion_type not in valid_types:
|
72 |
+
raise ValueError(f"Invalid assertion type. Choose from {valid_types}")
|
73 |
+
|
74 |
+
project_data = self._load_project()
|
75 |
+
if assertion_type in {"factual", "sql-only"}:
|
76 |
+
project_data["assertions"][assertion_type] = assertion
|
77 |
+
elif assertion_type == "knowledgebase":
|
78 |
+
project_data["assertions"]["knowledgebase"] = assertion
|
79 |
+
else:
|
80 |
+
project_data["assertions"][assertion_type].append(assertion)
|
81 |
+
|
82 |
+
self._save_project(project_data)
|
83 |
+
logging.info(f"Added {assertion_type} assertion: {assertion}")
|
84 |
+
|
85 |
+
def generate_clarifying_questions(self, user_input):
|
86 |
+
"""Generate clarifying questions using ChatGroq in JSON mode."""
|
87 |
+
llm = ChatGroq(temperature=0, response_format="json")
|
88 |
+
|
89 |
+
prompt = ChatPromptTemplate.from_template("""
|
90 |
+
Given the user prompt: "{user_input}", generate clarifying multiple-choice questions
|
91 |
+
to define constraints, preferences, and requirements.
|
92 |
+
|
93 |
+
Example Output:
|
94 |
+
[
|
95 |
+
{
|
96 |
+
"question": "What is the preferred programming language?",
|
97 |
+
"options": ["Python", "Java", "C++"]
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"question": "Should the solution be optimized for speed?",
|
101 |
+
"options": ["Yes", "No"]
|
102 |
+
}
|
103 |
+
]
|
104 |
+
|
105 |
+
Return ONLY valid JSON as per the format above.
|
106 |
+
""")
|
107 |
+
|
108 |
+
response = llm.predict(prompt.format(user_input=user_input))
|
109 |
+
|
110 |
+
try:
|
111 |
+
clarifying_questions = json.loads(response)
|
112 |
+
self.clarifying_questions = clarifying_questions
|
113 |
+
return clarifying_questions
|
114 |
+
except json.JSONDecodeError:
|
115 |
+
logging.error("Invalid JSON response from LLM.")
|
116 |
+
self.clarifying_questions = []
|
117 |
+
return []
|
118 |
+
|
119 |
+
def verify_assertions(self, user_input, llm_output):
|
120 |
+
|
121 |
+
|
122 |
+
"""Run checks against stored assertions"""
|
123 |
+
# 1. Deterministic
|
124 |
+
# 2. Fact correction
|
125 |
+
# 3. Misc check via llm
|
126 |
+
# 4. Behaviour check
|
127 |
+
|
128 |
+
project_data = self._load_project()
|
129 |
+
assertions = project_data["assertions"]
|
130 |
+
results = {"deterministic": [], "factual": [], "misc": []}
|
131 |
+
|
132 |
+
# 🔵 Deterministic Assertions
|
133 |
+
for assertion in assertions["deterministic"]:
|
134 |
+
pattern = assertion.get("value")
|
135 |
+
check_type = assertion.get("check_type")
|
136 |
+
|
137 |
+
if check_type == "regex":
|
138 |
+
match = re.search(pattern, llm_output) is not None
|
139 |
+
elif check_type == "contains":
|
140 |
+
match = pattern in llm_output
|
141 |
+
elif check_type == "not-contains":
|
142 |
+
match = pattern not in llm_output
|
143 |
+
elif check_type == "json_format":
|
144 |
+
try:
|
145 |
+
json.loads(llm_output)
|
146 |
+
match = True
|
147 |
+
except json.JSONDecodeError:
|
148 |
+
match = False
|
149 |
+
elif check_type == "sql_format":
|
150 |
+
match = verify_sql_query(llm_output)
|
151 |
+
else:
|
152 |
+
match = False
|
153 |
+
|
154 |
+
results["deterministic"].append((assertion, match))
|
155 |
+
|
156 |
+
# 🟡 Factual Assertions ############################# use module 3
|
157 |
+
if assertions["factual"] and assertions["knowledgebase"]:
|
158 |
+
# Load and parse the knowledge base (PDF, etc.) here for comparison
|
159 |
+
kb_path = assertions["knowledgebase"]
|
160 |
+
# Placeholder for actual factual verification
|
161 |
+
for fact in ["sample fact"]:
|
162 |
+
match = fact in llm_output
|
163 |
+
results["factual"].append((fact, match))
|
164 |
+
else:
|
165 |
+
results["factual"].append(("Knowledge Base Missing or Disabled", False))
|
166 |
+
|
167 |
+
# 🟢 Miscellaneous Assertions
|
168 |
+
for assertion in assertions["misc"]: #########################
|
169 |
+
validation = "complex check passed" # Placeholder for complex checks
|
170 |
+
results["misc"].append((assertion, validation))
|
171 |
+
|
172 |
+
return results
|
173 |
+
|
174 |
+
# def trace(self, func):
|
175 |
+
# """Decorator for tracing function calls and verifying LLM responses"""
|
176 |
+
# @wraps(func)
|
177 |
+
# def wrapper(*args, **kwargs):
|
178 |
+
# user_input = args[0] if args else None
|
179 |
+
# logging.info(f"Executing {func.__name__} with input: {user_input}")
|
180 |
+
|
181 |
+
# result = func(*args, **kwargs)
|
182 |
+
# logging.info(f"Received Output: {result}")
|
183 |
+
|
184 |
+
# verification_results = self.verify_assertions(user_input, result)
|
185 |
+
# logging.info(f"Verification Results: {verification_results}")
|
186 |
+
|
187 |
+
# # Update accuracy history
|
188 |
+
# project_data = self._load_project()
|
189 |
+
# project_data["accuracy_history"].append(verification_results)
|
190 |
+
# self._save_project(project_data)
|
191 |
+
|
192 |
+
# return result
|
193 |
+
# return wrapper
|
src/tracer/tools/__init__.py
ADDED
File without changes
|
src/tracer/tools/tools.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
import sqlparse
|
4 |
+
|
5 |
+
def verify_json_format(text):
|
6 |
+
"""Check if the text is a valid JSON"""
|
7 |
+
try:
|
8 |
+
json.loads(text)
|
9 |
+
return True
|
10 |
+
except json.JSONDecodeError:
|
11 |
+
return False
|
12 |
+
|
13 |
+
def verify_sql_query(text):
|
14 |
+
"""Check if the text is a valid SQL query using sqlparse"""
|
15 |
+
try:
|
16 |
+
parsed = sqlparse.parse(text)
|
17 |
+
if not parsed:
|
18 |
+
return False
|
19 |
+
# Basic validation: Check for common SQL commands
|
20 |
+
tokens = [token.ttype for token in parsed[0].tokens if not token.is_whitespace]
|
21 |
+
sql_keywords = ["SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "ALTER"]
|
22 |
+
return any(keyword in text.upper() for keyword in sql_keywords)
|
23 |
+
except Exception:
|
24 |
+
return False
|
25 |
+
|
26 |
+
def verify_regex(text, pattern):
|
27 |
+
"""Check if the text matches the given regex pattern"""
|
28 |
+
try:
|
29 |
+
return bool(re.search(pattern, text))
|
30 |
+
except re.error:
|
31 |
+
return False # Invalid regex pattern
|
32 |
+
|
33 |
+
def verify_contains(text, substring):
|
34 |
+
"""Check if the text contains the given substring (case-insensitive)"""
|
35 |
+
return substring.lower() in text.lower()
|