EssamDad commited on
Commit
5e275e0
·
verified ·
1 Parent(s): 2fd598d

Upload 9 files

Browse files
Semantic_Engine_V0/LoadVectorDB.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.vectorstores import Chroma
2
+ from langchain.embeddings.openai import OpenAIEmbeddings
3
+ import os
4
+ import openai
5
+ from langchain.chains import RetrievalQA
6
+
7
+
8
+
9
+ query ="ما هي بطاقة بلو البلاتينية"
10
+
11
+ os.environ["OPENAI_API_KEY"] = "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"
12
+ llm_name = "gpt-3.5-turbo"
13
+ llm_name = "gpt-3.5-turbo-1106"
14
+
15
+
16
+ #print(llm_name)
17
+
18
+ persist_directory = 'docs/chroma/'
19
+ embedding = OpenAIEmbeddings()
20
+ vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
21
+
22
+
23
+
24
+ #print(vectordb._collection.count())
25
+
26
+ from langchain.chat_models import ChatOpenAI
27
+ llm = ChatOpenAI(model_name=llm_name, temperature=0)
28
+
29
+
30
+ qa_chain = RetrievalQA.from_chain_type(
31
+ llm,
32
+ retriever=vectordb.as_retriever()
33
+ )
34
+
35
+
36
+ result = qa_chain({"query": query})
37
+ result
38
+
39
+
40
+
41
+
42
+
43
+ context1=""
44
+ context2=""
45
+
46
+ def response_to_query(query,context1="",context2="" ):
47
+
48
+ docs2 = vectordb.similarity_search(query,k=4)
49
+ qdocs = " ".join([docs2[i].page_content for i in range(len(docs2))])
50
+ qdocs=qdocs+"\n"+context1+"\n"+context2
51
+ #prompt = f"Answer the question '{query}' based on the following excerpts:\n'{qdocs }'"
52
+ #response = llm.call_as_llm(query+"\n\n"+qdocs )
53
+ #cond1= "اجعل الاجابة أقصر ما يمكن"
54
+ cond1="اجعل الاجابة و كأنك موظف بشري يتحدث اللهجة العراقية في مركز اتصالات و بشكل قصير ومختصر جدا وبحب و تقدير للسائل"
55
+ #cond1=cond1+"/n/n"+"answer from the context if you are not sure say I do not now in Arabic language"
56
+ dont2="يرجى الإجابة وفقًا للسياق، وإذا كنت لا تعلم، قل 'لا أعلم"
57
+ dont3="Answer based on the context provided; if you do not know the answer, please respond with 'I do not know."
58
+ dont4="Stick to the provided information and answer based solely on that context. If you can't answer with certainty, simply say 'I don't know"
59
+ dont5="Assume you have no prior knowledge beyond the information I provide. Answer my questions using only that context and clearly state 'I don't know' for any uncertainties"
60
+ dont6=""
61
+ rewrite= ": اعد كتابة مايلي و كأنك موظف بشري يتحدث اللهجة العراقية في مركز اتصالات و بشكل قصير ومختصر جدا وبحب و تقدير للسائل"
62
+ prompt = qdocs+"\n\n"+query +"\n\n"+cond1+"\n\n"#+cond1
63
+ #prompt= pr+"\n\n"+qdocs+"\n\n"+qa+"\n"+query # +"\n\n"+cond1
64
+ #prompt= qdocs+"\n"+query+"\n\n"#+pr # +"\n\n"+cond1
65
+ response1 = llm.call_as_llm( prompt)
66
+ #rewrite= ": اعد كتابة مايلي و كأنك موظف بشري يتحدث اللهجة العراقية في مركز اتصالات و بشكل قصير وبحب و تقدير للسائل"
67
+ print()
68
+ print(response1)
69
+ print()
70
+ #response1=llm.call_as_llm(rewrite+"\n\n"+ response1)
71
+ #print(query)
72
+ #print(response1)
73
+ #prompt2 = qdocs+"\n\n"+query +"\n\n"+cond2
74
+ #prompt2 = pr+"\n\n"+qdocs+"\n\n"+qa+"\n"+query +"\n\n"+cond2
75
+
76
+ docs3 = vectordb.similarity_search(response1,k=1)
77
+ qdocs3 = " ".join([docs3[i].page_content for i in range(len(docs3))])
78
+
79
+ response2 = qdocs3
80
+
81
+ #print()
82
+ #print(response2)
83
+ context1=context2
84
+ context2=query+"\n"+response2
85
+ return response1
86
+
87
+
88
+ #query=query1
89
+ #response=response_to_query(query,context1="",context2="" )
90
+
Semantic_Engine_V0/STT.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ import queue
5
+ import threading
6
+ import pyaudio
7
+ from google.cloud import speech_v1p1beta1 as speech
8
+ import LoadVectorDB
9
+ from LoadVectorDB import response_to_query
10
+ import TTSnew
11
+ import time
12
+ from TTS.api import TTS
13
+ import torch
14
+ from google.cloud import texttospeech
15
+ import pygame
16
+
17
+ #device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ #tts = TTS(model_path=r"C:\Users\essam.aldaoud\Desktop\Dahna\Project\new", config_path=r"C:\Users\essam.aldaoud\Desktop\Dahna\Project\new\config.json",progress_bar=False).to(device)
19
+
20
+ os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'modular-truck-412708-f03a74a9a717.json'
21
+
22
+ RATE = 16000
23
+ CHUNK = int(RATE / 10) # 100ms
24
+ counter=0
25
+ flag=False
26
+
27
+ class MicrophoneStream:
28
+ counter
29
+ flag
30
+ def __init__(self, rate, chunk):
31
+ self._rate = rate
32
+ self._chunk = chunk
33
+ self._buff = queue.Queue()
34
+ self.closed = True
35
+ MicrophoneStream.counter=time.time()
36
+ MicrophoneStream.flag=False
37
+
38
+ def __enter__(self):
39
+ self._audio_interface = pyaudio.PyAudio()
40
+ self._audio_stream = self._audio_interface.open(
41
+ format=pyaudio.paInt16,
42
+ channels=1,
43
+ rate=self._rate,
44
+ input=True,
45
+ frames_per_buffer=self._chunk,
46
+ stream_callback=self._fill_buffer,
47
+ )
48
+ self.closed = False
49
+ return self
50
+
51
+ def __exit__(self, type, value, traceback):
52
+ self._audio_stream.stop_stream()
53
+ self._audio_stream.close()
54
+ self.closed = True
55
+ self._buff.put(None)
56
+ self._audio_interface.terminate()
57
+
58
+ def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
59
+ self._buff.put(in_data)
60
+ return None, pyaudio.paContinue
61
+
62
+ def generator(self):
63
+
64
+ chunk = self._buff.get()
65
+ while not self.closed:
66
+ chunk = self._buff.get()
67
+ #print(MicrophoneStream.counter)
68
+ #MicrophoneStream.counter=time.time()
69
+ if chunk is None:
70
+ return
71
+ data = [chunk]
72
+
73
+ while True:
74
+ try:
75
+ chunk = self._buff.get(block=False)
76
+ if chunk is None:
77
+ return
78
+ data.append(chunk)
79
+ except queue.Empty:
80
+ break
81
+ yield b"".join(data)
82
+ if time.time()-MicrophoneStream.counter>2 and MicrophoneStream.flag:
83
+ MicrophoneStream.counter=time.time()
84
+ # yield "Hi there"
85
+ break
86
+
87
+ def listen_print_loop(responses):
88
+ num_chars_printed = 0
89
+ #print(MicrophoneStream.counter)
90
+ for response in responses:
91
+ if not response.results:
92
+ continue
93
+
94
+ result = response.results[0]
95
+ if not result.alternatives:
96
+ continue
97
+
98
+ transcript = result.alternatives[0].transcript
99
+ overwrite_chars = " " * (num_chars_printed - len(transcript))
100
+ MicrophoneStream.counter=time.time()
101
+ MicrophoneStream.flag=True
102
+ if not result.is_final: #and time.time() - last_transcript_time <= 5: # Break after 2 seconds of silence
103
+ #print("1234567")
104
+ #sys.stdout.write(transcript + overwrite_chars + "\r")
105
+ #sys.stdout.flush()
106
+ num_chars_printed = len(transcript)
107
+
108
+ else:
109
+
110
+ if re.search(r"\b(exit|quit|توقف|توقع)\b", transcript, re.I):
111
+ print("Exiting...")
112
+ break
113
+ #print(str(int(time.time()-MicrophoneStream.counter))+"==========")
114
+ print("\033[41m"+transcript + overwrite_chars+"\033[0m")
115
+ query=transcript + overwrite_chars
116
+ #print("\033[31mThis text is red\033[0m")
117
+ num_chars_printed = 0
118
+ query=query.strip()
119
+
120
+ #print (len(query))
121
+ if len(query)<10:
122
+ response="لا تقلق، أنا هنا لمساعدتك"
123
+ response="ما هي المعلوماتْ التي تبحث عنها بالتحديدْ؟"
124
+ response="هل يمكنك توضيح ما تَقصده من فضلكْ؟"
125
+ else:
126
+ response=response_to_query(query,context1="",context2="" )
127
+
128
+
129
+ print()
130
+ print("\033[44m"+response+"\033[0m")
131
+ print()
132
+ MicrophoneStream.flag=False
133
+ if response=="":
134
+ print("spaceeeeeeeeeeeeee")
135
+ continue
136
+
137
+ #tts.tts_to_file(response , speaker_wav=r"C:\Users\essam.aldaoud\Desktop\Dahna\Project\new\arabic.wav", language="ar", file_path="outputt.wav")
138
+ #try:
139
+ #pygame.mixer.music.unload()
140
+ #with open("outputt.wav", "wb") as out:
141
+ #out.write(response.audio_content)
142
+ #pygame.mixer.init()
143
+ #pygame.mixer.music.load("outputt.wav")
144
+ #pygame.mixer.music.play()
145
+ #except: # Save the audio to a file
146
+ #with open("outputt.wav", "wb") as out:
147
+ #out.write(response.audio_content)
148
+ #pygame.mixer.init()
149
+ #pygame.mixer.music.load("outputt.wav")
150
+ #pygame.mixer.music.play()
151
+
152
+
153
+
154
+
155
+ TTSnew.say_it(response)
156
+
157
+
158
+ def main():
159
+ language_code = "ar-XA"
160
+ client = speech.SpeechClient()
161
+ config = speech.RecognitionConfig(
162
+ encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
163
+ sample_rate_hertz=RATE,
164
+ language_code=language_code,
165
+ )
166
+
167
+ streaming_config = speech.StreamingRecognitionConfig(
168
+ config=config, interim_results=True
169
+ )
170
+
171
+ say="مرحبًا بك في مركز الاتصال لدينا. إسمي نورْ. كيف ممكنْ أن أُساعدكْ"
172
+ print("\033[44m"+say+"\033[0m")
173
+ #tts.tts_to_file(say , speaker_wav="arabic.wav", language="ar", file_path='outputt3.wav')
174
+ #try:
175
+ #pygame.mixer.music.unload()
176
+ #with open("outputt.wav", "wb") as out:
177
+ #out.write(response.audio_content)
178
+ #pygame.mixer.init()
179
+ #pygame.mixer.music.load("outputt3.wav")
180
+ #pygame.mixer.music.play()
181
+ #except: # Save the audio to a file
182
+ #with open("outputt.wav", "wb") as out:
183
+ #out.write(response.audio_content)
184
+ #pygame.mixer.init()
185
+ #pygame.mixer.music.load("outputt3.wav")
186
+ #pygame.mixer.music.play()
187
+ TTSnew.say_it(say)
188
+ #print("Start")
189
+ for j in range(100):
190
+ with MicrophoneStream(RATE, CHUNK) as stream:
191
+ audio_generator = stream.generator()
192
+
193
+ requests = (speech.StreamingRecognizeRequest(audio_content=content) for content in audio_generator)
194
+ responses = client.streaming_recognize(streaming_config, requests)
195
+ listen_print_loop(responses)
196
+
197
+ if __name__ == "__main__":
198
+ main()
Semantic_Engine_V0/SemanticEngin.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import openai
4
+ import sys
5
+ import datetime
6
+ from langchain.embeddings.openai import OpenAIEmbeddings
7
+
8
+ os.environ["OPENAI_API_KEY"] = "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"
9
+
10
+ current_date = datetime.datetime.now().date()
11
+ llm_name = "gpt-3.5-turbo"
12
+ llm_name = "gpt-3.5-turbo-1106"
13
+ print(llm_name)
14
+
15
+ from langchain.document_loaders import PyPDFLoader
16
+
17
+ # Load PDF
18
+ loaders = [
19
+ # Duplicate documents on purpose - messy data
20
+ #PyPDFLoader("arabic100.pdf")
21
+ PyPDFLoader("p18.pdf")
22
+ ]
23
+
24
+ docs = []
25
+ for loader in loaders:
26
+ docs.extend(loader.load())
27
+
28
+ #all_doc=" ".join([d.page_content for d in docs])
29
+ #all_doc
30
+
31
+ # Split
32
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
33
+ text_splitter = RecursiveCharacterTextSplitter(
34
+ chunk_size = 1500,
35
+ chunk_overlap = 150
36
+ )
37
+
38
+
39
+ splits = text_splitter.split_documents(docs)
40
+ print("Number of splits :",len(splits))
41
+
42
+
43
+
44
+ #embeddings = OpenAIEmbeddings(model="text-embedding-curie-001")
45
+ #embeddings = OpenAIEmbeddings(model="text-embedding-Babbage-001",deployment='text-embedding-Babbage-001')
46
+ embedding = OpenAIEmbeddings()
47
+
48
+
49
+
50
+
51
+ from langchain.vectorstores import Chroma
52
+ persist_directory = 'docs/chroma/'
53
+ #!rm -rf ./docs/chroma # remove old database files if any
54
+ vectordb = Chroma.from_documents(
55
+ documents=splits,
56
+ embedding=embedding,
57
+ persist_directory=persist_directory
58
+ )
59
+
60
+
61
+ #print(vectordb._collection.count())
62
+
63
+
64
+ #save vectors
65
+ vectordb.persist()
66
+
67
+
68
+
69
+
70
+
71
+
72
+
73
+
74
+
75
+
76
+
77
+
78
+
79
+
80
+
81
+
82
+
83
+
84
+
85
+
86
+
87
+
Semantic_Engine_V0/TTSnew.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from google.cloud import texttospeech
3
+ import pygame
4
+
5
+
6
+ # Set up authentication
7
+
8
+ os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'modular-truck-412708-f03a74a9a717.json'
9
+
10
+ # Create a client
11
+ client = texttospeech.TextToSpeechClient()
12
+
13
+ # Set text and voice parameters
14
+
15
+ say=" الذي طور اسلوبا يسمح بحساب كيفية تشفير المعلومات داخل الخلايا العصبيةا"
16
+ say="مرحبًا بك في مركز الاتصال لدينا. إسمي رغدا ، كيف ممكنْ أن أُساعدكْ"
17
+
18
+
19
+ voice = texttospeech.VoiceSelectionParams(
20
+ language_code="ar-XA", # Arabic language code
21
+ name="ar-XA-Wavenet-A", # Optional: specify a specific voice
22
+ ssml_gender=texttospeech.SsmlVoiceGender.FEMALE # Optional: set gender
23
+ )
24
+
25
+ # Generate the audio
26
+ audio_config = texttospeech.AudioConfig(
27
+
28
+ audio_encoding=texttospeech.AudioEncoding.MP3,
29
+ #audio_encoding=texttospeech.AudioEncoding.LINEAR16, # Adjust encoding
30
+ pitch=1, # Adjust pitch
31
+ speaking_rate=1.1 # Adjust speaking rate
32
+ )
33
+
34
+
35
+ def say_it(say):
36
+ synthesis_input = texttospeech.SynthesisInput(text=say) # Arabic text
37
+
38
+ response = client.synthesize_speech(input=synthesis_input, voice=voice, audio_config=audio_config)
39
+
40
+
41
+
42
+
43
+ try:
44
+ pygame.mixer.music.unload()
45
+ with open("output2.mp3", "wb") as out:
46
+ out.write(response.audio_content)
47
+ pygame.mixer.init()
48
+ pygame.mixer.music.load("output2.mp3")
49
+ pygame.mixer.music.play()
50
+ except: # Save the audio to a file
51
+ with open("output2.mp3", "wb") as out:
52
+ out.write(response.audio_content)
53
+ pygame.mixer.init()
54
+ pygame.mixer.music.load("output2.mp3")
55
+ pygame.mixer.music.play()
56
+
57
+
58
+
59
+
60
+
61
+
62
+
Semantic_Engine_V0/__pycache__/LoadVectorDB.cpython-311.pyc ADDED
Binary file (3.96 kB). View file
 
Semantic_Engine_V0/__pycache__/TTSnew.cpython-311.pyc ADDED
Binary file (2.93 kB). View file
 
Semantic_Engine_V0/modular-truck-412708-f03a74a9a717.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "service_account",
3
+ "project_id": "modular-truck-412708",
4
+ "private_key_id": "f03a74a9a717032d62ec434bc6d18110a8f96a68",
5
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5Ni4AJV4czfA8\n5uUUHAW3vNxUrcVdxveOw4s4OOZQhgXmTS83wBkHkMyGarBqDNzg24XXQPF98knZ\ncId78EXglzSL1aua+We4iE81s6eLrROaV6wKkt5cnfX3dqTcj4ZNQFcKu3ifs6yb\njZvG7X2dRTUaUNlu8Ubjslx3kWlkP6mDh+TZtSuNcl4JJIZm6ClX9Suc7Stnv1+X\nF9HFKzCuiInp+skJ/K/IgVTzG8l12Lfe/Ts5HMr+yKELJUGzgaGF0YagDToDHyCw\nygastZMITXY7JIYVizwpcJCt1axlVb+xwDzSniGZDe4/AdgH82pPj2QMqQtYkZCm\nWmAhWwZNAgMBAAECggEAHyDcxuAA6xDut2GsLz7NdDXdBytkkFAUQ+2YHMUtamu4\nGp6u2K0j18ptoYrVZyGNzwn+OvEalWsvH8lCsCEwMWDgbz8kdinEVbatL26oMnjO\nomJpUyafkkvnLuYFK9/AeAPgDTj4e0josgUxIRRByDi9RdTAUp3hJvxPf7YUE0m5\nDk9EML08ramrVEyFSytYFSa+apq6jqukmwWn1PUMgV2nbpLZS+Ov1jCoAyrGTqbv\nlzcuDygufuOZ+39/VL9UgoqxFzY1D4fkd0y1OSmo+yMcbYz5D5b/TT8Omcju5+im\nuyRgi/93Vb2Ek3cHQ0dz+p2yNpHhI9fVHyEjvU8j6QKBgQD30PQz4txlE2QPbHso\n5JLmYGIE7yj87w8YwcWFsZ+3rCllyo3XiBXos4EVAB/OhQ528RyVu/QEfDYQj4E7\naDSZu+rvp9oBrfwng1SPcGDYxfeRwoyEcIiLOnp5A6j/d/vYNhpj6anA6L2RswlD\n2IjMbUkXxqVbc8buPkGrzEL/lQKBgQC/U/bzPkUQwlc3TqYh7kZ5XrCmHCeR//2Q\n3wrAXOXgn0QTP/mC6ahzPa98icfPMKb3oKsnyO7LueQXzIEgI43YXIbuIKyUxrIr\npeGG4VfMRS4zPp07sdmhsuEb3oxwG49VVU7xVX4Nj3uZ1FBSc3CDm5HXweJLszHW\n4ZL2wgqd2QKBgC00pcdtQ80AW4lgvZq5D38M5/SEeBKjvTpD5rc7rqn/stGJwJQn\nk++OBycAjczLgtb5psA9uAa1bdtZgjaHDMlaeUHcea1CwrOmhl9gHZqNilrBhYaH\nTLExKrQ2kbaAeaV4QRomvruc+S/B3BfPQS0hZmr65qEnHovJOYuQSEx9AoGAJlrc\nvLKVAeETvuCjaVXtBtO2sfYe8KPS30H3U8zLwJXs/bbrfBT/UerhzSOJzL1CG444\nAYfEZ07irOqwUBMQknkqC369Wi3i9eERLwrpoOeVdWxt9NOYf1Pu0Llyid7cgwzR\nAIxU8r761kG7wGun+JotSIVzqh1INn2OCq2nlzECgYEAsHcDLSiihqrHwXgvtoJU\n321UMn6PlkpMr3XHVe3VJl7iI0nm1Yq0j6D9EbJGgLBKnMGm8oRY7B229gjs3hFi\n0/hFgg6NekAIgTPIc+lJZaTZIWYpe9hls4nGy8XuuX6oIYlMUNvJSpDjA/OmkP6H\nLdyOzqx5kikBIQspw6svRwY=\n-----END PRIVATE KEY-----\n",
6
+ "client_email": "[email protected]",
7
+ "client_id": "107035152631291440440",
8
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
9
+ "token_uri": "https://oauth2.googleapis.com/token",
10
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
11
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/dahna-388%40modular-truck-412708.iam.gserviceaccount.com",
12
+ "universe_domain": "googleapis.com"
13
+ }
Semantic_Engine_V0/output2.mp3 ADDED
Binary file (21.4 kB). View file
 
Semantic_Engine_V0/p18.pdf ADDED
Binary file (610 kB). View file