willwade commited on
Commit
f5b302e
·
0 Parent(s):

Initial commit

Browse files
Files changed (15) hide show
  1. .gitattributes +2 -0
  2. .gitignore +160 -0
  3. LICENSE +21 -0
  4. README-HF.md +63 -0
  5. README.md +114 -0
  6. app.py +176 -0
  7. app_simple.py +148 -0
  8. demo.py +27 -0
  9. huggingface-space-metadata.json +11 -0
  10. requirements-spaces.txt +6 -0
  11. requirements.txt +6 -0
  12. social_graph.json +236 -0
  13. test_components.py +83 -0
  14. to-do.md +92 -0
  15. utils.py +188 -0
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 will wade
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README-HF.md ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AAC Social Graph Assistant for MND
2
+
3
+ An Augmentative and Alternative Communication (AAC) system that uses a social graph to provide contextually relevant suggestions for users with Motor Neurone Disease (MND).
4
+
5
+ ## About
6
+
7
+ This demo showcases an AAC system that uses a social graph to provide contextually relevant suggestions for users with MND. The system allows users to select who they are talking to and provides suggestions based on the relationship and common topics of conversation, tailored to the British context with NHS healthcare terminology.
8
+
9
+ ## Features
10
+
11
+ - **Person-Specific Suggestions**: Select who you're talking to and get suggestions tailored to that relationship
12
+ - **Context-Aware**: Uses a social graph to understand relationships and common topics
13
+ - **Multiple Suggestion Types**: Get suggestions from a language model, common phrases, or predefined utterance categories
14
+ - **British Context**: Designed with British English and NHS healthcare context in mind
15
+ - **MND-Specific**: Tailored for the needs of someone with Motor Neurone Disease
16
+ - **Expandable**: Easily improve the system by enhancing the social graph JSON file
17
+
18
+ ## Current Social Graph Context
19
+
20
+ The current social graph represents a British person with MND who:
21
+
22
+ - Lives in Manchester with their wife Emma and two children (Mabel, 4 and Billy, 7)
23
+ - Was diagnosed with MND 5 months ago
24
+ - Works as a computer programmer
25
+ - Has friends from Scout days growing up in South East London
26
+ - Enjoys cycling and hiking in the Peak District and Lake District
27
+ - Has a healthcare team including a neurologist, MND nurse specialist, physiotherapist, and speech therapist
28
+ - Is supported by work colleagues with flexible arrangements
29
+ - Has family in South East London
30
+
31
+ ## How to Use
32
+
33
+ 1. Select a person from the dropdown menu
34
+ 2. View their context information
35
+ 3. Optionally enter current conversation context
36
+ 4. Choose a suggestion type
37
+ 5. Click "Generate Suggestions" to get contextually relevant phrases
38
+ 6. Click "Speak Selected Text" to simulate speaking the selected phrase
39
+
40
+ ## Customizing the Social Graph
41
+
42
+ You can customize the system by editing the `social_graph.json` file. The file has the following structure:
43
+
44
+ ```json
45
+ {
46
+ "people": {
47
+ "person_id": {
48
+ "name": "Person Name",
49
+ "role": "Relationship",
50
+ "topics": ["Topic 1", "Topic 2"],
51
+ "frequency": "daily/weekly/monthly",
52
+ "common_phrases": ["Phrase 1", "Phrase 2"],
53
+ "context": "Detailed context about the relationship"
54
+ }
55
+ },
56
+ "places": ["Place 1", "Place 2"],
57
+ "topics": ["Topic 1", "Topic 2"],
58
+ "common_utterances": {
59
+ "category1": ["Utterance 1", "Utterance 2"],
60
+ "category2": ["Utterance 3", "Utterance 4"]
61
+ }
62
+ }
63
+ ```
README.md ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AAC Social Graph Assistant
2
+
3
+ An Augmentative and Alternative Communication (AAC) system that uses a social graph to provide contextually relevant suggestions for users with Motor Neurone Disease (MND). This demo is designed to be hosted on Hugging Face Spaces using Gradio.
4
+
5
+ ## Features
6
+
7
+ - **Person-Specific Suggestions**: Select who you're talking to and get suggestions tailored to that relationship
8
+ - **Context-Aware**: Uses a social graph to understand relationships and common topics
9
+ - **Multiple Suggestion Types**: Get suggestions from a language model, common phrases, or predefined utterance categories
10
+ - **British Context**: Designed with British English and NHS healthcare context in mind
11
+ - **MND-Specific**: Tailored for the needs of someone with Motor Neurone Disease
12
+ - **Expandable**: Easily improve the system by enhancing the social graph JSON file
13
+
14
+ ## Getting Started
15
+
16
+ ### Prerequisites
17
+
18
+ - Python 3.8+
19
+ - Dependencies listed in `requirements.txt`
20
+
21
+ ### Installation
22
+
23
+ 1. Clone this repository
24
+ 2. Install the required dependencies:
25
+
26
+ ```bash
27
+ pip install -r requirements.txt
28
+ ```
29
+
30
+ 3. Run the application:
31
+
32
+ ```bash
33
+ python app.py
34
+ ```
35
+
36
+ 4. Open your browser and navigate to the URL shown in the terminal (typically http://127.0.0.1:7860)
37
+
38
+ ## How It Works
39
+
40
+ 1. **Social Graph**: The system uses a JSON-based social graph (`social_graph.json`) that contains information about people, their relationships, common topics, and phrases.
41
+
42
+ 2. **Context Retrieval**: When you select a person, the system retrieves relevant context information from the social graph.
43
+
44
+ 3. **Suggestion Generation**: Based on the selected person and optional conversation context, the system generates suggestions using:
45
+ - A language model (Flan-T5)
46
+ - Common phrases associated with the person
47
+ - General utterance categories (greetings, needs, emotions, questions)
48
+
49
+ 4. **User Interface**: The Gradio interface provides an intuitive way to interact with the system, select people, and get suggestions.
50
+
51
+ ## Customizing the Social Graph
52
+
53
+ You can customize the system by editing the `social_graph.json` file. The file has the following structure:
54
+
55
+ ```json
56
+ {
57
+ "people": {
58
+ "person_id": {
59
+ "name": "Person Name",
60
+ "role": "Relationship",
61
+ "topics": ["Topic 1", "Topic 2"],
62
+ "frequency": "daily/weekly/monthly",
63
+ "common_phrases": ["Phrase 1", "Phrase 2"],
64
+ "context": "Detailed context about the relationship"
65
+ }
66
+ },
67
+ "places": ["Place 1", "Place 2"],
68
+ "topics": ["Topic 1", "Topic 2"],
69
+ "common_utterances": {
70
+ "category1": ["Utterance 1", "Utterance 2"],
71
+ "category2": ["Utterance 3", "Utterance 4"]
72
+ }
73
+ }
74
+ ```
75
+
76
+ ## Current Social Graph Context
77
+
78
+ The current social graph represents a British person with MND who:
79
+
80
+ - Lives in Manchester with their wife Emma and two children (Mabel, 4 and Billy, 7)
81
+ - Was diagnosed with MND 5 months ago
82
+ - Works as a computer programmer
83
+ - Has friends from Scout days growing up in South East London
84
+ - Enjoys cycling and hiking in the Peak District and Lake District
85
+ - Has a healthcare team including a neurologist, MND nurse specialist, physiotherapist, and speech therapist
86
+ - Is supported by work colleagues with flexible arrangements
87
+ - Has family in South East London
88
+
89
+ ## Deployment to Hugging Face Spaces
90
+
91
+ To deploy this application to Hugging Face Spaces:
92
+
93
+ 1. Create a new Space on Hugging Face
94
+ 2. Select Gradio as the SDK
95
+ 3. Upload the files from this repository
96
+ 4. The Space will automatically build and deploy the application
97
+
98
+ ## Future Improvements
99
+
100
+ - Add speech recognition for input
101
+ - Implement text-to-speech for output
102
+ - Add a visual representation of the social graph
103
+ - Support for multiple users with different social graphs
104
+ - Add emotion/sentiment detection for more contextually appropriate suggestions
105
+
106
+ ## License
107
+
108
+ This project is open source and available under the MIT License.
109
+
110
+ ## Acknowledgments
111
+
112
+ - Built with [Gradio](https://www.gradio.app/)
113
+ - Uses [Hugging Face Transformers](https://huggingface.co/transformers/) for language models
114
+ - Uses [Sentence Transformers](https://www.sbert.net/) for semantic matching
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import whisper
3
+ import tempfile
4
+ import os
5
+ from utils import SocialGraphManager, SuggestionGenerator
6
+
7
+ # Initialize the social graph manager and suggestion generator
8
+ social_graph = SocialGraphManager("social_graph.json")
9
+ suggestion_generator = SuggestionGenerator()
10
+
11
+ # Initialize Whisper model (using the smallest model for speed)
12
+ try:
13
+ whisper_model = whisper.load_model("tiny")
14
+ whisper_loaded = True
15
+ except Exception as e:
16
+ print(f"Warning: Could not load Whisper model: {e}")
17
+ whisper_loaded = False
18
+
19
+
20
+ def format_person_display(person):
21
+ """Format person information for display in the dropdown."""
22
+ return f"{person['name']} ({person['role']})"
23
+
24
+
25
+ def get_people_choices():
26
+ """Get formatted choices for the people dropdown."""
27
+ people = social_graph.get_people_list()
28
+ return {format_person_display(person): person["id"] for person in people}
29
+
30
+
31
+ def get_suggestion_categories():
32
+ """Get suggestion categories from the social graph."""
33
+ if "common_utterances" in social_graph.graph:
34
+ return list(social_graph.graph["common_utterances"].keys())
35
+ return []
36
+
37
+
38
+ def on_person_change(person_id):
39
+ """Handle person selection change."""
40
+ if not person_id:
41
+ return "", ""
42
+
43
+ person_context = social_graph.get_person_context(person_id)
44
+ context_info = (
45
+ f"**{person_context.get('name', '')}** - {person_context.get('role', '')}\n\n"
46
+ )
47
+ context_info += f"**Topics:** {', '.join(person_context.get('topics', []))}\n\n"
48
+ context_info += f"**Frequency:** {person_context.get('frequency', '')}\n\n"
49
+ context_info += f"**Context:** {person_context.get('context', '')}"
50
+
51
+ # Get common phrases for this person
52
+ phrases = person_context.get("common_phrases", [])
53
+ phrases_text = "\n\n".join(phrases)
54
+
55
+ return context_info, phrases_text
56
+
57
+
58
+ def generate_suggestions(person_id, user_input, suggestion_type):
59
+ """Generate suggestions based on the selected person and user input."""
60
+ if not person_id:
61
+ return "Please select a person first."
62
+
63
+ person_context = social_graph.get_person_context(person_id)
64
+
65
+ # If suggestion type is "model", use the language model
66
+ if suggestion_type == "model":
67
+ suggestion = suggestion_generator.generate_suggestion(
68
+ person_context, user_input
69
+ )
70
+ return suggestion
71
+
72
+ # If suggestion type is "common_phrases", use the person's common phrases
73
+ elif suggestion_type == "common_phrases":
74
+ phrases = social_graph.get_relevant_phrases(person_id, user_input)
75
+ return "\n\n".join(phrases)
76
+
77
+ # If suggestion type is a category from common_utterances
78
+ elif suggestion_type in get_suggestion_categories():
79
+ utterances = social_graph.get_common_utterances(suggestion_type)
80
+ return "\n\n".join(utterances)
81
+
82
+ # Default fallback
83
+ return "No suggestions available."
84
+
85
+
86
+ def transcribe_audio(audio_path):
87
+ """Transcribe audio using Whisper."""
88
+ if not whisper_loaded:
89
+ return "Whisper model not loaded. Please check your installation."
90
+
91
+ try:
92
+ # Transcribe the audio
93
+ result = whisper_model.transcribe(audio_path)
94
+ return result["text"]
95
+ except Exception as e:
96
+ print(f"Error transcribing audio: {e}")
97
+ return "Could not transcribe audio. Please try again."
98
+
99
+
100
+ # Create the Gradio interface
101
+ with gr.Blocks(title="AAC Social Graph Assistant") as demo:
102
+ gr.Markdown("# AAC Social Graph Assistant")
103
+ gr.Markdown(
104
+ "Select who you're talking to, and get contextually relevant suggestions."
105
+ )
106
+
107
+ with gr.Row():
108
+ with gr.Column(scale=1):
109
+ # Person selection
110
+ person_dropdown = gr.Dropdown(
111
+ choices=get_people_choices(), label="Who are you talking to?"
112
+ )
113
+
114
+ # Context display
115
+ context_display = gr.Markdown(label="Context Information")
116
+
117
+ # User input section
118
+ with gr.Row():
119
+ user_input = gr.Textbox(
120
+ label="Your current conversation (optional)",
121
+ placeholder="Type or paste current conversation context here...",
122
+ lines=3,
123
+ )
124
+
125
+ # Audio input
126
+ with gr.Row():
127
+ audio_input = gr.Audio(
128
+ label="Or record your conversation",
129
+ type="filepath",
130
+ sources=["microphone"],
131
+ )
132
+ transcribe_btn = gr.Button("Transcribe", variant="secondary")
133
+
134
+ # Suggestion type selection
135
+ suggestion_type = gr.Radio(
136
+ choices=["model", "common_phrases"] + get_suggestion_categories(),
137
+ value="model",
138
+ label="Suggestion Type",
139
+ )
140
+
141
+ # Generate button
142
+ generate_btn = gr.Button("Generate Suggestions", variant="primary")
143
+
144
+ with gr.Column(scale=1):
145
+ # Common phrases
146
+ common_phrases = gr.Textbox(
147
+ label="Common Phrases",
148
+ placeholder="Common phrases will appear here...",
149
+ lines=5,
150
+ )
151
+
152
+ # Suggestions output
153
+ suggestions_output = gr.Textbox(
154
+ label="Suggested Phrases",
155
+ placeholder="Suggestions will appear here...",
156
+ lines=8,
157
+ )
158
+
159
+ # Set up event handlers
160
+ person_dropdown.change(
161
+ on_person_change,
162
+ inputs=[person_dropdown],
163
+ outputs=[context_display, common_phrases],
164
+ )
165
+
166
+ generate_btn.click(
167
+ generate_suggestions,
168
+ inputs=[person_dropdown, user_input, suggestion_type],
169
+ outputs=[suggestions_output],
170
+ )
171
+
172
+ speak_btn.click(speak_text, inputs=[suggestions_output], outputs=[speech_output])
173
+
174
+ # Launch the app
175
+ if __name__ == "__main__":
176
+ demo.launch()
app_simple.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utils import SocialGraphManager, SuggestionGenerator
3
+
4
+ # Initialize the social graph manager and suggestion generator
5
+ social_graph = SocialGraphManager("social_graph.json")
6
+ suggestion_generator = SuggestionGenerator()
7
+
8
+ def format_person_display(person):
9
+ """Format person information for display in the dropdown."""
10
+ return f"{person['name']} ({person['role']})"
11
+
12
+ def get_people_choices():
13
+ """Get formatted choices for the people dropdown."""
14
+ people = social_graph.get_people_list()
15
+ return {format_person_display(person): person["id"] for person in people}
16
+
17
+ def get_suggestion_categories():
18
+ """Get suggestion categories from the social graph."""
19
+ if "common_utterances" in social_graph.graph:
20
+ return list(social_graph.graph["common_utterances"].keys())
21
+ return []
22
+
23
+ def on_person_change(person_id):
24
+ """Handle person selection change."""
25
+ if not person_id:
26
+ return "", []
27
+
28
+ person_context = social_graph.get_person_context(person_id)
29
+ context_info = f"**{person_context.get('name', '')}** - {person_context.get('role', '')}\n\n"
30
+ context_info += f"**Topics:** {', '.join(person_context.get('topics', []))}\n\n"
31
+ context_info += f"**Frequency:** {person_context.get('frequency', '')}\n\n"
32
+ context_info += f"**Context:** {person_context.get('context', '')}"
33
+
34
+ # Get common phrases for this person
35
+ phrases = person_context.get("common_phrases", [])
36
+ phrases_text = "\n\n".join(phrases)
37
+
38
+ return context_info, phrases_text
39
+
40
+ def generate_suggestions(person_id, user_input, suggestion_type):
41
+ """Generate suggestions based on the selected person and user input."""
42
+ if not person_id:
43
+ return "Please select a person first."
44
+
45
+ person_context = social_graph.get_person_context(person_id)
46
+
47
+ # If suggestion type is "model", use the language model
48
+ if suggestion_type == "model":
49
+ suggestion = suggestion_generator.generate_suggestion(person_context, user_input)
50
+ return suggestion
51
+
52
+ # If suggestion type is "common_phrases", use the person's common phrases
53
+ elif suggestion_type == "common_phrases":
54
+ phrases = social_graph.get_relevant_phrases(person_id, user_input)
55
+ return "\n\n".join(phrases)
56
+
57
+ # If suggestion type is a category from common_utterances
58
+ elif suggestion_type in get_suggestion_categories():
59
+ utterances = social_graph.get_common_utterances(suggestion_type)
60
+ return "\n\n".join(utterances)
61
+
62
+ # Default fallback
63
+ return "No suggestions available."
64
+
65
+ def speak_text(text):
66
+ """Function to 'speak' the selected text (placeholder for TTS integration)."""
67
+ return f"Speaking: {text}"
68
+
69
+ # Create the Gradio interface
70
+ with gr.Blocks(title="AAC Social Graph Assistant") as demo:
71
+ gr.Markdown("# AAC Social Graph Assistant")
72
+ gr.Markdown("Select who you're talking to, and get contextually relevant suggestions.")
73
+
74
+ with gr.Row():
75
+ with gr.Column(scale=1):
76
+ # Person selection
77
+ person_dropdown = gr.Dropdown(
78
+ choices=get_people_choices(),
79
+ label="Who are you talking to?"
80
+ )
81
+
82
+ # Context display
83
+ context_display = gr.Markdown(label="Context Information")
84
+
85
+ # User input
86
+ user_input = gr.Textbox(
87
+ label="Your current conversation (optional)",
88
+ placeholder="Type or paste current conversation context here...",
89
+ lines=3
90
+ )
91
+
92
+ # Suggestion type selection
93
+ suggestion_type = gr.Radio(
94
+ choices=["model", "common_phrases"] + get_suggestion_categories(),
95
+ value="model",
96
+ label="Suggestion Type"
97
+ )
98
+
99
+ # Generate button
100
+ generate_btn = gr.Button("Generate Suggestions", variant="primary")
101
+
102
+ with gr.Column(scale=1):
103
+ # Common phrases
104
+ common_phrases = gr.Textbox(
105
+ label="Common Phrases",
106
+ placeholder="Common phrases will appear here...",
107
+ lines=5
108
+ )
109
+
110
+ # Suggestions output
111
+ suggestions_output = gr.Textbox(
112
+ label="Suggested Phrases",
113
+ placeholder="Suggestions will appear here...",
114
+ lines=8
115
+ )
116
+
117
+ # Speak button
118
+ speak_btn = gr.Button("Speak Selected Text", variant="secondary")
119
+
120
+ # Speech output
121
+ speech_output = gr.Textbox(
122
+ label="Speech Output",
123
+ placeholder="Speech output will appear here...",
124
+ lines=2
125
+ )
126
+
127
+ # Set up event handlers
128
+ person_dropdown.change(
129
+ on_person_change,
130
+ inputs=[person_dropdown],
131
+ outputs=[context_display, common_phrases]
132
+ )
133
+
134
+ generate_btn.click(
135
+ generate_suggestions,
136
+ inputs=[person_dropdown, user_input, suggestion_type],
137
+ outputs=[suggestions_output]
138
+ )
139
+
140
+ speak_btn.click(
141
+ speak_text,
142
+ inputs=[suggestions_output],
143
+ outputs=[speech_output]
144
+ )
145
+
146
+ # Launch the app
147
+ if __name__ == "__main__":
148
+ demo.launch()
demo.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
2
+ import json
3
+
4
+ # Load model
5
+ model_name = "google/flan-t5-base"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
8
+ rag_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer)
9
+
10
+ # Load KG
11
+ with open("social_graph.json", "r") as f:
12
+ kg = json.load(f)
13
+
14
+ # Build context
15
+ person = kg["people"]["bob"]
16
+ context = f"Bob is the user's son. They talk about football weekly. Last conversation was about coaching changes."
17
+
18
+ # User input
19
+ query = "What should I say to Bob?"
20
+
21
+ # RAG-style prompt
22
+ prompt = f"""Context: {context}
23
+ User wants to say something appropriate to Bob. Suggest a phrase:"""
24
+
25
+ # Generate
26
+ response = rag_pipeline(prompt, max_length=50)
27
+ print(response[0]["generated_text"])
huggingface-space-metadata.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "title": "AAC Social Graph Assistant",
3
+ "emoji": "💬",
4
+ "colorFrom": "blue",
5
+ "colorTo": "indigo",
6
+ "sdk": "gradio",
7
+ "sdk_version": "4.0.0",
8
+ "app_file": "app.py",
9
+ "pinned": false,
10
+ "license": "mit"
11
+ }
requirements-spaces.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ transformers>=4.30.0
3
+ sentence-transformers>=2.2.2
4
+ torch>=2.0.0
5
+ numpy>=1.24.0
6
+ openai-whisper>=20231117
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio>=4.0.0
2
+ transformers>=4.30.0
3
+ sentence-transformers>=2.2.2
4
+ torch>=2.0.0
5
+ numpy>=1.24.0
6
+ openai-whisper>=20231117
social_graph.json ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "people": {
3
+ "emma": {
4
+ "name": "Emma",
5
+ "role": "wife",
6
+ "topics": ["family plans", "children's activities", "home organization", "weekend outings", "mutual friends"],
7
+ "frequency": "daily",
8
+ "common_phrases": [
9
+ "Have you taken your medication?",
10
+ "I love you",
11
+ "Shall we watch something tonight?",
12
+ "The kids miss you when you're resting",
13
+ "Do you need anything from the shops?",
14
+ "Your mum called earlier"
15
+ ],
16
+ "context": "Emma is your wife and partner of 12 years. You met at work where she was in HR. She's been incredibly supportive since your MND diagnosis 5 months ago. She's trying to balance caring for you, the children, and maintaining some normalcy in family life. She's originally from York but has embraced Manchester life."
17
+ },
18
+ "mabel": {
19
+ "name": "Mabel",
20
+ "role": "daughter",
21
+ "topics": ["nursery school", "drawing", "dinosaurs", "bedtime stories", "playground"],
22
+ "frequency": "daily",
23
+ "common_phrases": [
24
+ "Daddy, look what I made!",
25
+ "Can you read me a story?",
26
+ "I love dinosaurs",
27
+ "Why can't you play chase anymore?",
28
+ "When will you get better?",
29
+ "Can we go to the park?"
30
+ ],
31
+ "context": "Mabel is your 4-year-old daughter who's full of energy and curiosity. She's just starting to understand that you're ill but doesn't grasp the full implications. She loves when you read to her and is obsessed with dinosaurs. She's very affectionate and often wants cuddles."
32
+ },
33
+ "billy": {
34
+ "name": "Billy",
35
+ "role": "son",
36
+ "topics": ["school", "football", "video games", "science experiments", "cycling"],
37
+ "frequency": "daily",
38
+ "common_phrases": [
39
+ "Dad, can you help with my homework?",
40
+ "Did you see the United match?",
41
+ "Can I play on the iPad?",
42
+ "When can we go cycling again?",
43
+ "I scored a goal at football practice!",
44
+ "I miss our bike rides"
45
+ ],
46
+ "context": "Billy is your 7-year-old son who's becoming more aware of your condition. He's a bright boy who loves football (Manchester United), science, and cycling. He often asks when you'll be able to do activities together again. He's protective of his little sister despite their occasional squabbles."
47
+ },
48
+ "dr_patel": {
49
+ "name": "Dr. Patel",
50
+ "role": "neurologist",
51
+ "topics": ["MND progression", "symptom management", "medication", "clinical trials", "assistive technology"],
52
+ "frequency": "fortnightly",
53
+ "common_phrases": [
54
+ "How have your symptoms been progressing?",
55
+ "Are you experiencing any new difficulties?",
56
+ "Let's discuss adjusting your medication",
57
+ "Have you been using the assistive devices?",
58
+ "There's a new trial I think you might be eligible for",
59
+ "How are you coping mentally?"
60
+ ],
61
+ "context": "Dr. Patel is your neurologist at Manchester Royal Infirmary who's been managing your MND since diagnosis 5 months ago. She's direct but compassionate, and always takes time to explain treatment options. She's particularly interested in getting you enrolled in clinical trials that might help slow progression."
62
+ },
63
+ "jenny": {
64
+ "name": "Jenny",
65
+ "role": "MND nurse specialist",
66
+ "topics": ["daily management", "equipment needs", "NHS services", "symptom control", "practical advice"],
67
+ "frequency": "weekly",
68
+ "common_phrases": [
69
+ "How are you managing at home?",
70
+ "Let's review what equipment might help",
71
+ "I can refer you to occupational therapy",
72
+ "Have you considered voice banking?",
73
+ "Are you having any issues with the new equipment?",
74
+ "How is Emma coping?"
75
+ ],
76
+ "context": "Jenny is your MND specialist nurse who visits weekly to help manage your condition. She's a practical problem-solver who knows all the NHS systems and how to get things done quickly. She's been invaluable in getting you the right equipment and support services."
77
+ },
78
+ "dave": {
79
+ "name": "Dave",
80
+ "role": "best mate",
81
+ "topics": ["programming", "tech news", "football", "old scout adventures", "pub quizzes"],
82
+ "frequency": "weekly",
83
+ "common_phrases": [
84
+ "Fancy watching the match this weekend?",
85
+ "Have you seen this new tech?",
86
+ "Remember that camping disaster in the Lake District?",
87
+ "How's the voice banking going?",
88
+ "The lads are asking about you",
89
+ "You're still better at coding than me, even with one hand!"
90
+ ],
91
+ "context": "Dave has been your best mate since Scout days in South East London. He also moved to Manchester for work and has been your rock since the diagnosis. He treats you exactly the same as before, which you appreciate. He's a software developer too and brings industry gossip and tech news when he visits."
92
+ },
93
+ "mum": {
94
+ "name": "Mum",
95
+ "role": "mother",
96
+ "topics": ["family news", "childhood memories", "South London gossip", "health advice", "grandchildren"],
97
+ "frequency": "twice weekly",
98
+ "common_phrases": [
99
+ "Are you eating properly?",
100
+ "The grandkids are growing so fast",
101
+ "Have you tried that treatment I read about?",
102
+ "Do you need me to come up for a few days?",
103
+ "Your sister sends her love",
104
+ "Remember when you used to climb that tree in the garden?"
105
+ ],
106
+ "context": "Your mum still lives in the family home in South East London where you grew up. She's worried sick about your diagnosis but tries to stay positive. She visits monthly and calls twice a week. She tends to fuss and offer well-meaning but sometimes unhelpful health advice. She adores her grandchildren."
107
+ },
108
+ "alex": {
109
+ "name": "Alex",
110
+ "role": "work colleague",
111
+ "topics": ["programming projects", "work gossip", "flexible working", "accessibility tech", "industry news"],
112
+ "frequency": "twice weekly",
113
+ "common_phrases": [
114
+ "The project is still on track",
115
+ "Don't worry about the deadline",
116
+ "We've implemented those accessibility features you suggested",
117
+ "The team misses you in the office",
118
+ "Can I get your input on this code?",
119
+ "HR is being supportive about your situation"
120
+ ],
121
+ "context": "Alex is your team lead at work who's been incredibly supportive, setting up remote working and flexible hours as your condition progresses. They visit occasionally and call twice weekly to keep you in the loop. They're making efforts to implement accessibility features you've suggested that help you continue working."
122
+ },
123
+ "physio": {
124
+ "name": "Claire",
125
+ "role": "physiotherapist",
126
+ "topics": ["exercises", "mobility", "equipment", "muscle maintenance", "pain management"],
127
+ "frequency": "weekly",
128
+ "common_phrases": [
129
+ "Let's try these new exercises",
130
+ "How's your range of motion today?",
131
+ "Are you doing the daily stretches?",
132
+ "This equipment might help with that",
133
+ "Tell me where the discomfort is",
134
+ "You're making good progress with these movements"
135
+ ],
136
+ "context": "Claire is your NHS physiotherapist who comes weekly to help maintain muscle function and mobility. She's practical and encouraging, always pushing you to do a bit more than you think you can. She's knowledgeable about MND and adapts exercises as your condition changes."
137
+ },
138
+ "speech_therapist": {
139
+ "name": "Mark",
140
+ "role": "speech and language therapist",
141
+ "topics": ["speech exercises", "voice banking", "communication devices", "swallowing techniques", "AAC options"],
142
+ "frequency": "fortnightly",
143
+ "common_phrases": [
144
+ "Let's practice those vocal exercises",
145
+ "How's the voice banking coming along?",
146
+ "Have you tried the new communication app?",
147
+ "These techniques might help with swallowing",
148
+ "Your speech clarity is holding up well",
149
+ "Let's explore some AAC options for the future"
150
+ ],
151
+ "context": "Mark is your speech and language therapist who's helping you maintain speech function and prepare for future communication needs. He's tech-savvy and always researching the latest AAC solutions. He's been instrumental in getting your voice banking set up and finding communication solutions that fit your tech background."
152
+ }
153
+ },
154
+ "places": [
155
+ "home in Manchester",
156
+ "Manchester Royal Infirmary",
157
+ "MND clinic",
158
+ "local park",
159
+ "children's school",
160
+ "parents' house in London",
161
+ "Peak District",
162
+ "Lake District",
163
+ "home office",
164
+ "physiotherapy center"
165
+ ],
166
+ "topics": [
167
+ "MND progression",
168
+ "family activities",
169
+ "children's development",
170
+ "programming projects",
171
+ "assistive technology",
172
+ "Manchester United",
173
+ "cycling memories",
174
+ "hiking in the Peaks",
175
+ "childhood in London",
176
+ "voice banking",
177
+ "future planning",
178
+ "symptom management",
179
+ "accessibility",
180
+ "Scout memories",
181
+ "work accommodations"
182
+ ],
183
+ "common_utterances": {
184
+ "greetings": [
185
+ "Hello",
186
+ "Alright?",
187
+ "Good to see you",
188
+ "How's it going?",
189
+ "Cheers for coming"
190
+ ],
191
+ "needs": [
192
+ "I'm parched, could I get some water?",
193
+ "Need to use the loo",
194
+ "I'm not comfortable, can you help me adjust?",
195
+ "Could you pass me my tablet?",
196
+ "I'm feeling a bit tired now"
197
+ ],
198
+ "emotions": [
199
+ "I'm having a good day today",
200
+ "Feeling a bit knackered",
201
+ "I'm chuffed to bits about that",
202
+ "I'm worried about the kids",
203
+ "Just feeling a bit frustrated with all this",
204
+ "I miss being able to cycle"
205
+ ],
206
+ "questions": [
207
+ "What's been happening?",
208
+ "How are the kids doing at school?",
209
+ "Any news from work?",
210
+ "Did United win at the weekend?",
211
+ "Have you heard from my mum?",
212
+ "When's my next appointment?"
213
+ ],
214
+ "tech_talk": [
215
+ "Have you tried the new JavaScript framework?",
216
+ "My voice banking needs more samples",
217
+ "This AAC interface could be more intuitive",
218
+ "I've been coding a small project to keep busy",
219
+ "The predictive text on this needs improvement"
220
+ ],
221
+ "reminiscing": [
222
+ "Remember our cycling trip in the Lakes?",
223
+ "The kids have grown so fast",
224
+ "I miss those Scout camping trips",
225
+ "South London's changed so much since we left",
226
+ "Remember when we first moved to Manchester?"
227
+ ],
228
+ "organization": [
229
+ "What's the plan for today?",
230
+ "I've forgotten what time the appointment is",
231
+ "Where did I save that file?",
232
+ "Have we sorted out the school forms?",
233
+ "Did I take my medication already?"
234
+ ]
235
+ }
236
+ }
test_components.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test script for AAC Social Graph Assistant components.
3
+ Run this script to verify that the components work correctly.
4
+ """
5
+
6
+ import json
7
+ from utils import SocialGraphManager, SuggestionGenerator
8
+
9
+ def test_social_graph_manager():
10
+ """Test the SocialGraphManager class."""
11
+ print("\n=== Testing SocialGraphManager ===")
12
+
13
+ # Initialize the social graph manager
14
+ graph_manager = SocialGraphManager("social_graph.json")
15
+
16
+ # Test loading the graph
17
+ print(f"Loaded graph with {len(graph_manager.graph.get('people', {}))} people")
18
+
19
+ # Test getting people list
20
+ people = graph_manager.get_people_list()
21
+ print(f"People in the graph: {', '.join([p['name'] for p in people])}")
22
+
23
+ # Test getting person context
24
+ if people:
25
+ person_id = people[0]['id']
26
+ person_context = graph_manager.get_person_context(person_id)
27
+ print(f"\nContext for {person_context.get('name', person_id)}:")
28
+ print(f" Role: {person_context.get('role', '')}")
29
+ print(f" Topics: {', '.join(person_context.get('topics', []))}")
30
+ print(f" Frequency: {person_context.get('frequency', '')}")
31
+
32
+ # Test getting relevant phrases
33
+ phrases = graph_manager.get_relevant_phrases(person_id)
34
+ print(f"\nCommon phrases for {person_context.get('name', person_id)}:")
35
+ for phrase in phrases:
36
+ print(f" - {phrase}")
37
+
38
+ # Test getting common utterances
39
+ categories = list(graph_manager.graph.get("common_utterances", {}).keys())
40
+ if categories:
41
+ category = categories[0]
42
+ utterances = graph_manager.get_common_utterances(category)
43
+ print(f"\nCommon utterances in category '{category}':")
44
+ for utterance in utterances:
45
+ print(f" - {utterance}")
46
+
47
+ return graph_manager
48
+
49
+ def test_suggestion_generator(graph_manager):
50
+ """Test the SuggestionGenerator class."""
51
+ print("\n=== Testing SuggestionGenerator ===")
52
+
53
+ # Initialize the suggestion generator
54
+ try:
55
+ generator = SuggestionGenerator()
56
+
57
+ # Test generating a suggestion
58
+ people = graph_manager.get_people_list()
59
+ if people and generator.model_loaded:
60
+ person_id = people[0]['id']
61
+ person_context = graph_manager.get_person_context(person_id)
62
+
63
+ print(f"\nGenerating suggestion for {person_context.get('name', person_id)}...")
64
+ suggestion = generator.generate_suggestion(person_context)
65
+ print(f"Suggestion: {suggestion}")
66
+
67
+ # Test with user input
68
+ user_input = "We were talking about the weather yesterday."
69
+ print(f"\nGenerating suggestion with user input: '{user_input}'")
70
+ suggestion = generator.generate_suggestion(person_context, user_input)
71
+ print(f"Suggestion: {suggestion}")
72
+ elif not generator.model_loaded:
73
+ print("Model not loaded. Skipping suggestion generation test.")
74
+ else:
75
+ print("No people in the graph. Skipping suggestion generation test.")
76
+ except Exception as e:
77
+ print(f"Error testing suggestion generator: {e}")
78
+
79
+ if __name__ == "__main__":
80
+ print("Testing AAC Social Graph Assistant components...")
81
+ graph_manager = test_social_graph_manager()
82
+ test_suggestion_generator(graph_manager)
83
+ print("\nTests completed.")
to-do.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AAC Context-Aware Demo: To-Do Document
2
+
3
+ ## Goal
4
+
5
+ Create a proof-of-concept offline-capable RAG (Retrieval-Augmented Generation) system for ALS AAC users that:
6
+
7
+ * Uses a lightweight knowledge graph (JSON)
8
+ * Supports utterance suggestion and correction
9
+ * Uses local/offline LLMs (e.g., Gemma, Flan-T5)
10
+ * Includes a semantic retriever to match context (e.g. conversation partner, topics)
11
+ * Provides a Gradio-based UI for deployment on HuggingFace
12
+
13
+ ---
14
+
15
+ ## Phase 1: Environment Setup
16
+
17
+ * [ ] Install Gradio, Transformers, Sentence-Transformers
18
+ * [ ] Choose and install inference backends:
19
+
20
+ * [ ] `google/flan-t5-base` (via HuggingFace Transformers)
21
+ * [ ] Gemma 2B via Ollama or Transformers (check support for offline use)
22
+ * [ ] Sentence similarity model (`sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2` or similar)
23
+
24
+ ---
25
+
26
+ ## Phase 2: Knowledge Graph
27
+
28
+ * [ ] Create example `social_graph.json` (people, topics, relationships)
29
+ * [ ] Define function to extract relevant context given a selected person
30
+
31
+ * Name, relationship, typical topics, frequency
32
+ * [ ] Format for prompt injection: inline context for LLM use
33
+
34
+ ---
35
+
36
+ ## Phase 3: Semantic Retriever
37
+
38
+ * [ ] Load sentence-transformer model
39
+ * [ ] Create index from the social graph topics/descriptions
40
+ * [ ] Match transcript to closest node(s) in the graph
41
+ * [ ] Retrieve context for prompt generation
42
+
43
+ ---
44
+
45
+ ## Phase 4: Gradio UI
46
+
47
+ * [ ] Simple interface:
48
+
49
+ * Dropdown: Select "Who is speaking?" (Bob, Alice, etc.)
50
+ * Record Button: Capture audio input
51
+ * Text area: Show transcript
52
+ * Toggle tabs:
53
+
54
+ * [ ] "Suggest Utterance"
55
+ * [ ] "Correct Message"
56
+ * Output: Generated message
57
+ * [ ] Implement Whisper transcription (use `whisper`, `faster-whisper`, or `whisper.cpp`)
58
+ * [ ] Pass transcript + retrieved context to LLM model
59
+
60
+ ---
61
+
62
+ ## Phase 5: Model Comparison
63
+
64
+ * [ ] Test both Flan-T5 and Gemma:
65
+
66
+ * [ ] Evaluate speed/quality tradeoffs
67
+ * [ ] Compare correction accuracy and context-specific generation
68
+
69
+ ---
70
+
71
+ ## Optional Phase 6: HuggingFace Deployment
72
+
73
+ * [ ] Clean up UI and remove dependencies requiring GPU-only execution
74
+ * [ ] Upload Gradio demo to HuggingFace Spaces
75
+ * [ ] Add documentation and example graphs/transcripts
76
+
77
+ ---
78
+
79
+ ## Notes
80
+
81
+ * Keep user privacy and safety in mind (no cloud transcription if Whisper offline is available)
82
+ * Keep JSON editable for later expansion (add sessions, emotional tone, etc.)
83
+ * Option to cache LLM suggestions for fast recall
84
+
85
+ ---
86
+
87
+ ## Future Features (Post-Proof of Concept)
88
+
89
+ * Add visualisation of social graph (D3 or static SVG)
90
+ * Add editable profile page for caregivers
91
+ * Add chat history / rolling transcript viewer
92
+ * Add emotion/sentiment detection for tone-aware suggestions
utils.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ from typing import Dict, List, Any, Optional, Tuple
4
+ from sentence_transformers import SentenceTransformer
5
+ import numpy as np
6
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
7
+
8
+ class SocialGraphManager:
9
+ """Manages the social graph and provides context for the AAC system."""
10
+
11
+ def __init__(self, graph_path: str = "social_graph.json"):
12
+ """Initialize the social graph manager.
13
+
14
+ Args:
15
+ graph_path: Path to the social graph JSON file
16
+ """
17
+ self.graph_path = graph_path
18
+ self.graph = self._load_graph()
19
+
20
+ # Initialize sentence transformer for semantic matching
21
+ try:
22
+ self.sentence_model = SentenceTransformer('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
23
+ self.embeddings_cache = {}
24
+ self._initialize_embeddings()
25
+ except Exception as e:
26
+ print(f"Warning: Could not load sentence transformer model: {e}")
27
+ self.sentence_model = None
28
+
29
+ def _load_graph(self) -> Dict[str, Any]:
30
+ """Load the social graph from the JSON file."""
31
+ try:
32
+ with open(self.graph_path, "r") as f:
33
+ return json.load(f)
34
+ except Exception as e:
35
+ print(f"Error loading social graph: {e}")
36
+ return {"people": {}, "places": [], "topics": []}
37
+
38
+ def _initialize_embeddings(self):
39
+ """Initialize embeddings for topics and phrases in the social graph."""
40
+ if not self.sentence_model:
41
+ return
42
+
43
+ # Create embeddings for topics
44
+ topics = self.graph.get("topics", [])
45
+ for topic in topics:
46
+ if topic not in self.embeddings_cache:
47
+ self.embeddings_cache[topic] = self.sentence_model.encode(topic)
48
+
49
+ # Create embeddings for common phrases
50
+ for person_id, person_data in self.graph.get("people", {}).items():
51
+ for phrase in person_data.get("common_phrases", []):
52
+ if phrase not in self.embeddings_cache:
53
+ self.embeddings_cache[phrase] = self.sentence_model.encode(phrase)
54
+
55
+ # Create embeddings for common utterances
56
+ for category, utterances in self.graph.get("common_utterances", {}).items():
57
+ for utterance in utterances:
58
+ if utterance not in self.embeddings_cache:
59
+ self.embeddings_cache[utterance] = self.sentence_model.encode(utterance)
60
+
61
+ def get_people_list(self) -> List[Dict[str, str]]:
62
+ """Get a list of people from the social graph with their names and roles."""
63
+ people = []
64
+ for person_id, person_data in self.graph.get("people", {}).items():
65
+ people.append({
66
+ "id": person_id,
67
+ "name": person_data.get("name", person_id),
68
+ "role": person_data.get("role", "")
69
+ })
70
+ return people
71
+
72
+ def get_person_context(self, person_id: str) -> Dict[str, Any]:
73
+ """Get context information for a specific person."""
74
+ if person_id not in self.graph.get("people", {}):
75
+ return {}
76
+
77
+ return self.graph["people"][person_id]
78
+
79
+ def get_relevant_phrases(self, person_id: str, user_input: Optional[str] = None) -> List[str]:
80
+ """Get relevant phrases for a specific person based on user input."""
81
+ if person_id not in self.graph.get("people", {}):
82
+ return []
83
+
84
+ person_data = self.graph["people"][person_id]
85
+ phrases = person_data.get("common_phrases", [])
86
+
87
+ # If no user input, return random phrases
88
+ if not user_input or not self.sentence_model:
89
+ return random.sample(phrases, min(3, len(phrases)))
90
+
91
+ # Use semantic search to find relevant phrases
92
+ user_embedding = self.sentence_model.encode(user_input)
93
+ phrase_scores = []
94
+
95
+ for phrase in phrases:
96
+ if phrase in self.embeddings_cache:
97
+ phrase_embedding = self.embeddings_cache[phrase]
98
+ else:
99
+ phrase_embedding = self.sentence_model.encode(phrase)
100
+ self.embeddings_cache[phrase] = phrase_embedding
101
+
102
+ similarity = np.dot(user_embedding, phrase_embedding) / (
103
+ np.linalg.norm(user_embedding) * np.linalg.norm(phrase_embedding)
104
+ )
105
+ phrase_scores.append((phrase, similarity))
106
+
107
+ # Sort by similarity score and return top phrases
108
+ phrase_scores.sort(key=lambda x: x[1], reverse=True)
109
+ return [phrase for phrase, _ in phrase_scores[:3]]
110
+
111
+ def get_common_utterances(self, category: Optional[str] = None) -> List[str]:
112
+ """Get common utterances from the social graph, optionally filtered by category."""
113
+ utterances = []
114
+
115
+ if "common_utterances" not in self.graph:
116
+ return utterances
117
+
118
+ if category and category in self.graph["common_utterances"]:
119
+ return self.graph["common_utterances"][category]
120
+
121
+ # If no category specified, return a sample from each category
122
+ for category_utterances in self.graph["common_utterances"].values():
123
+ utterances.extend(random.sample(category_utterances,
124
+ min(2, len(category_utterances))))
125
+
126
+ return utterances
127
+
128
+ class SuggestionGenerator:
129
+ """Generates contextual suggestions for the AAC system."""
130
+
131
+ def __init__(self, model_name: str = "google/flan-t5-base"):
132
+ """Initialize the suggestion generator.
133
+
134
+ Args:
135
+ model_name: Name of the HuggingFace model to use
136
+ """
137
+ try:
138
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
139
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
140
+ self.generator = pipeline("text2text-generation",
141
+ model=self.model,
142
+ tokenizer=self.tokenizer)
143
+ self.model_loaded = True
144
+ except Exception as e:
145
+ print(f"Warning: Could not load model {model_name}: {e}")
146
+ self.model_loaded = False
147
+
148
+ def generate_suggestion(self,
149
+ person_context: Dict[str, Any],
150
+ user_input: Optional[str] = None,
151
+ max_length: int = 50) -> str:
152
+ """Generate a contextually appropriate suggestion.
153
+
154
+ Args:
155
+ person_context: Context information about the person
156
+ user_input: Optional user input to consider
157
+ max_length: Maximum length of the generated suggestion
158
+
159
+ Returns:
160
+ A generated suggestion string
161
+ """
162
+ if not self.model_loaded:
163
+ return "Model not loaded. Please check your installation."
164
+
165
+ # Extract context information
166
+ name = person_context.get("name", "")
167
+ role = person_context.get("role", "")
168
+ topics = ", ".join(person_context.get("topics", []))
169
+ context = person_context.get("context", "")
170
+
171
+ # Build prompt
172
+ prompt = f"""Context: {context}
173
+ Person: {name} ({role})
174
+ Topics of interest: {topics}
175
+ """
176
+
177
+ if user_input:
178
+ prompt += f"Current conversation: {user_input}\n"
179
+
180
+ prompt += "Generate an appropriate phrase to say to this person:"
181
+
182
+ # Generate suggestion
183
+ try:
184
+ response = self.generator(prompt, max_length=max_length)
185
+ return response[0]["generated_text"]
186
+ except Exception as e:
187
+ print(f"Error generating suggestion: {e}")
188
+ return "Could not generate a suggestion. Please try again."