willwade commited on
Commit
b5cc3fd
·
1 Parent(s): 1d94515

add in history

Browse files
Files changed (8) hide show
  1. README.md +8 -0
  2. app.py +342 -7
  3. llm_interface.py +29 -0
  4. simple_test.py +80 -0
  5. social_graph.json +276 -12
  6. test_conversation_history.py +223 -0
  7. test_llm_with_history.py +146 -0
  8. utils.py +151 -0
README.md CHANGED
@@ -101,6 +101,14 @@ You can customize the system by editing the `social_graph.json` file. The file h
101
  }
102
  ```
103
 
 
 
 
 
 
 
 
 
104
  ## Current Social Graph Context
105
 
106
  The current social graph represents a British person with MND who:
 
101
  }
102
  ```
103
 
104
+ ## Plan
105
+
106
+ Look at using a Structured Knowledge Format (SKF) – a compact, machine-optimized format designed for efficient AI parsing rather than human readability.
107
+
108
+ We should create a SKF<->JSON converter to convert the social graph JSON into a more compact format. This will help in reducing the size of the social graph and make it easier for AI models to parse.
109
+
110
+ See also https://github.com/marv1nnnnn/llm-min.txt
111
+
112
  ## Current Social Graph Context
113
 
114
  The current social graph represents a British person with MND who:
app.py CHANGED
@@ -119,6 +119,9 @@ def get_people_choices():
119
  display_name = format_person_display(person)
120
  person_id = person["id"]
121
  choices[display_name] = person_id
 
 
 
122
  return choices
123
 
124
 
@@ -152,9 +155,19 @@ def get_suggestion_categories():
152
  def on_person_change(person_id):
153
  """Handle person selection change."""
154
  if not person_id:
155
- return "", "", []
156
 
157
- person_context = social_graph.get_person_context(person_id)
 
 
 
 
 
 
 
 
 
 
158
 
159
  # Create a more user-friendly context display
160
  name = person_context.get("name", "")
@@ -176,7 +189,45 @@ def on_person_change(person_id):
176
  # Get topics for this person
177
  topics = person_context.get("topics", [])
178
 
179
- return context_info, phrases_text, topics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
 
182
  def change_model(model_name, progress=gr.Progress()):
@@ -541,6 +592,129 @@ def transcribe_audio(audio_path):
541
  return "Could not transcribe audio. Please try again."
542
 
543
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
544
  # Create the Gradio interface
545
  with gr.Blocks(title="Will's AAC Communication Aid", css="custom.css") as demo:
546
  gr.Markdown("# Will's AAC Communication Aid")
@@ -679,6 +853,13 @@ with gr.Blocks(title="Will's AAC Communication Aid", css="custom.css") as demo:
679
  lines=5,
680
  )
681
 
 
 
 
 
 
 
 
682
  # Suggestions output
683
  suggestions_output = gr.Markdown(
684
  label="My Suggested Responses",
@@ -686,16 +867,22 @@ with gr.Blocks(title="Will's AAC Communication Aid", css="custom.css") as demo:
686
  elem_id="suggestions_output", # Add an ID for easier debugging
687
  )
688
 
 
 
 
 
 
 
689
  # Set up event handlers
690
  def handle_person_change(person_id):
691
  """Handle person selection change and update UI elements."""
692
- context_info, phrases_text, _ = on_person_change(person_id)
693
 
694
  # Get topics for this person
695
  topics = get_filtered_topics(person_id)
696
 
697
- # Update the context, phrases, and topic dropdown
698
- return context_info, phrases_text, gr.update(choices=topics)
699
 
700
  def handle_model_change(model_name):
701
  """Handle model selection change."""
@@ -706,7 +893,7 @@ with gr.Blocks(title="Will's AAC Communication Aid", css="custom.css") as demo:
706
  person_dropdown.change(
707
  handle_person_change,
708
  inputs=[person_dropdown],
709
- outputs=[context_display, common_phrases, topic_dropdown],
710
  )
711
 
712
  # Set up the model change event
@@ -738,6 +925,154 @@ with gr.Blocks(title="Will's AAC Communication Aid", css="custom.css") as demo:
738
  outputs=[user_input],
739
  )
740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741
  # Launch the app
742
  if __name__ == "__main__":
743
  print("Starting application...")
 
119
  display_name = format_person_display(person)
120
  person_id = person["id"]
121
  choices[display_name] = person_id
122
+
123
+ # Debug the choices
124
+ print(f"People choices: {choices}")
125
  return choices
126
 
127
 
 
155
  def on_person_change(person_id):
156
  """Handle person selection change."""
157
  if not person_id:
158
+ return "", "", [], ""
159
 
160
+ # Get the people choices dictionary
161
+ people_choices = get_people_choices()
162
+
163
+ # Extract the actual ID if it's in the format "Name (role)"
164
+ actual_person_id = person_id
165
+ if person_id in people_choices:
166
+ # If the person_id is a display name, get the actual ID
167
+ actual_person_id = people_choices[person_id]
168
+ print(f"on_person_change: Extracted actual person ID: {actual_person_id}")
169
+
170
+ person_context = social_graph.get_person_context(actual_person_id)
171
 
172
  # Create a more user-friendly context display
173
  name = person_context.get("name", "")
 
189
  # Get topics for this person
190
  topics = person_context.get("topics", [])
191
 
192
+ # Get conversation history for this person
193
+ conversation_history = person_context.get("conversation_history", [])
194
+ history_text = ""
195
+
196
+ if conversation_history:
197
+ # Sort by timestamp (most recent first)
198
+ sorted_history = sorted(
199
+ conversation_history, key=lambda x: x.get("timestamp", ""), reverse=True
200
+ )[
201
+ :2
202
+ ] # Get only the 2 most recent conversations
203
+
204
+ history_text = "### Recent Conversations:\n\n"
205
+
206
+ for i, conversation in enumerate(sorted_history):
207
+ # Format the timestamp
208
+ timestamp = conversation.get("timestamp", "")
209
+ try:
210
+ import datetime
211
+
212
+ dt = datetime.datetime.fromisoformat(timestamp)
213
+ formatted_date = dt.strftime("%B %d, %Y at %I:%M %p")
214
+ except (ValueError, TypeError):
215
+ formatted_date = timestamp
216
+
217
+ history_text += f"**Conversation on {formatted_date}:**\n\n"
218
+
219
+ # Add the messages
220
+ messages = conversation.get("messages", [])
221
+ for message in messages:
222
+ speaker = message.get("speaker", "Unknown")
223
+ text = message.get("text", "")
224
+ history_text += f"*{speaker}*: {text}\n\n"
225
+
226
+ # Add a separator between conversations
227
+ if i < len(sorted_history) - 1:
228
+ history_text += "---\n\n"
229
+
230
+ return context_info, phrases_text, topics, history_text
231
 
232
 
233
  def change_model(model_name, progress=gr.Progress()):
 
592
  return "Could not transcribe audio. Please try again."
593
 
594
 
595
+ def save_conversation(person_id, user_input, selected_response):
596
+ """Save a conversation to the social graph.
597
+
598
+ Args:
599
+ person_id: ID of the person in the conversation
600
+ user_input: What the person said to Will
601
+ selected_response: Will's response
602
+
603
+ Returns:
604
+ True if successful, False otherwise
605
+ """
606
+ print(f"Saving conversation for person_id: {person_id}")
607
+ print(f"User input: {user_input}")
608
+ print(f"Selected response: {selected_response}")
609
+
610
+ if not person_id:
611
+ print("Error: No person_id provided")
612
+ return False
613
+
614
+ if not (user_input or selected_response):
615
+ print("Error: No user input or selected response provided")
616
+ return False
617
+
618
+ # Create message objects
619
+ messages = []
620
+
621
+ # Get the person's name
622
+ person_context = social_graph.get_person_context(person_id)
623
+ if not person_context:
624
+ print(f"Error: Could not get person context for {person_id}")
625
+ return False
626
+
627
+ person_name = person_context.get("name", "Person")
628
+ print(f"Person name: {person_name}")
629
+
630
+ # Add the user's message if provided
631
+ if user_input:
632
+ messages.append({"speaker": person_name, "text": user_input})
633
+ print(f"Added user message: {user_input}")
634
+
635
+ # Add Will's response
636
+ if selected_response:
637
+ messages.append({"speaker": "Will", "text": selected_response})
638
+ print(f"Added Will's response: {selected_response}")
639
+
640
+ # Save the conversation
641
+ if messages:
642
+ print(f"Saving {len(messages)} messages to conversation history")
643
+ try:
644
+ success = social_graph.add_conversation(person_id, messages)
645
+ print(f"Save result: {success}")
646
+ if success:
647
+ # Manage the conversation history (keep only the most recent ones)
648
+ manage_result = manage_conversation_history(person_id)
649
+ print(f"Manage conversation history result: {manage_result}")
650
+ return success
651
+ except Exception as e:
652
+ print(f"Error saving conversation: {e}")
653
+ return False
654
+ else:
655
+ print("No messages to save")
656
+
657
+ return False
658
+
659
+
660
+ def manage_conversation_history(person_id, max_conversations=5):
661
+ """Manage the conversation history for a person.
662
+
663
+ Args:
664
+ person_id: ID of the person
665
+ max_conversations: Maximum number of conversations to keep in the social graph
666
+
667
+ Returns:
668
+ True if successful, False otherwise
669
+ """
670
+ if not person_id:
671
+ return False
672
+
673
+ # Get the person's conversation history
674
+ person_context = social_graph.get_person_context(person_id)
675
+ conversation_history = person_context.get("conversation_history", [])
676
+
677
+ # If we have more than the maximum number of conversations, summarize the oldest ones
678
+ if len(conversation_history) > max_conversations:
679
+ # Sort by timestamp (oldest first)
680
+ sorted_history = sorted(
681
+ conversation_history, key=lambda x: x.get("timestamp", "")
682
+ )
683
+
684
+ # Keep the most recent conversations
685
+ keep_conversations = sorted_history[-max_conversations:]
686
+
687
+ # Summarize the older conversations
688
+ older_conversations = sorted_history[:-max_conversations]
689
+
690
+ # Create summaries for the older conversations
691
+ summaries = []
692
+ for conversation in older_conversations:
693
+ summary = social_graph.summarize_conversation(conversation)
694
+ summaries.append(
695
+ {"timestamp": conversation.get("timestamp", ""), "summary": summary}
696
+ )
697
+
698
+ # Update the person's conversation history
699
+ social_graph.graph["people"][person_id][
700
+ "conversation_history"
701
+ ] = keep_conversations
702
+
703
+ # Add summaries if they don't exist
704
+ if "conversation_summaries" not in social_graph.graph["people"][person_id]:
705
+ social_graph.graph["people"][person_id]["conversation_summaries"] = []
706
+
707
+ # Add the new summaries
708
+ social_graph.graph["people"][person_id]["conversation_summaries"].extend(
709
+ summaries
710
+ )
711
+
712
+ # Save the updated graph
713
+ return social_graph._save_graph()
714
+
715
+ return True
716
+
717
+
718
  # Create the Gradio interface
719
  with gr.Blocks(title="Will's AAC Communication Aid", css="custom.css") as demo:
720
  gr.Markdown("# Will's AAC Communication Aid")
 
853
  lines=5,
854
  )
855
 
856
+ # Conversation history display
857
+ conversation_history = gr.Markdown(
858
+ label="Recent Conversations",
859
+ value="Select a person to see recent conversations...",
860
+ elem_id="conversation_history",
861
+ )
862
+
863
  # Suggestions output
864
  suggestions_output = gr.Markdown(
865
  label="My Suggested Responses",
 
867
  elem_id="suggestions_output", # Add an ID for easier debugging
868
  )
869
 
870
+ # Add buttons to select and use a specific response
871
+ with gr.Row():
872
+ use_response_1 = gr.Button("Use Response 1", variant="secondary")
873
+ use_response_2 = gr.Button("Use Response 2", variant="secondary")
874
+ use_response_3 = gr.Button("Use Response 3", variant="secondary")
875
+
876
  # Set up event handlers
877
  def handle_person_change(person_id):
878
  """Handle person selection change and update UI elements."""
879
+ context_info, phrases_text, _, history_text = on_person_change(person_id)
880
 
881
  # Get topics for this person
882
  topics = get_filtered_topics(person_id)
883
 
884
+ # Update the context, phrases, conversation history, and topic dropdown
885
+ return context_info, phrases_text, gr.update(choices=topics), history_text
886
 
887
  def handle_model_change(model_name):
888
  """Handle model selection change."""
 
893
  person_dropdown.change(
894
  handle_person_change,
895
  inputs=[person_dropdown],
896
+ outputs=[context_display, common_phrases, topic_dropdown, conversation_history],
897
  )
898
 
899
  # Set up the model change event
 
925
  outputs=[user_input],
926
  )
927
 
928
+ # Function to extract a response from the suggestions output
929
+ def extract_response(suggestions_text, response_number):
930
+ """Extract a specific response from the suggestions output.
931
+
932
+ Args:
933
+ suggestions_text: The text containing all suggestions
934
+ response_number: Which response to extract (1, 2, or 3)
935
+
936
+ Returns:
937
+ The extracted response or None if not found
938
+ """
939
+ print(
940
+ f"Extracting response {response_number} from suggestions text: {suggestions_text[:100]}..."
941
+ )
942
+
943
+ if not suggestions_text:
944
+ print("Suggestions text is empty")
945
+ return None
946
+
947
+ if "AI-Generated Responses" not in suggestions_text:
948
+ print("AI-Generated Responses not found in suggestions text")
949
+ # Try to extract from any numbered list
950
+ try:
951
+ import re
952
+
953
+ pattern = rf"{response_number}\.\s+(.*?)(?=\n\n\d+\.|\n\n$|$)"
954
+ match = re.search(pattern, suggestions_text)
955
+ if match:
956
+ extracted = match.group(1).strip()
957
+ print(f"Found response using generic pattern: {extracted[:50]}...")
958
+ return extracted
959
+ except Exception as e:
960
+ print(f"Error extracting response with generic pattern: {e}")
961
+ return None
962
+
963
+ try:
964
+ # Look for numbered responses like "1. Response text"
965
+ import re
966
+
967
+ pattern = rf"{response_number}\.\s+(.*?)(?=\n\n\d+\.|\n\n$|$)"
968
+ match = re.search(pattern, suggestions_text)
969
+ if match:
970
+ extracted = match.group(1).strip()
971
+ print(f"Successfully extracted response: {extracted[:50]}...")
972
+ return extracted
973
+ else:
974
+ print(f"No match found for response {response_number}")
975
+ # Try a more lenient pattern
976
+ pattern = rf"{response_number}\.\s+(.*)"
977
+ match = re.search(pattern, suggestions_text)
978
+ if match:
979
+ extracted = match.group(1).strip()
980
+ print(f"Found response using lenient pattern: {extracted[:50]}...")
981
+ return extracted
982
+ except Exception as e:
983
+ print(f"Error extracting response: {e}")
984
+
985
+ print(f"Failed to extract response {response_number}")
986
+ return None
987
+
988
+ # Function to handle using a response
989
+ def use_response(suggestions_text, response_number, person_id, user_input_text):
990
+ """Handle using a specific response.
991
+
992
+ Args:
993
+ suggestions_text: The text containing all suggestions
994
+ response_number: Which response to use (1, 2, or 3)
995
+ person_id: ID of the person in the conversation
996
+ user_input_text: What the person said to Will
997
+
998
+ Returns:
999
+ Updated conversation history
1000
+ """
1001
+ print(f"\n=== Using Response {response_number} ===")
1002
+ print(f"Person ID: {person_id}")
1003
+ print(f"User input: {user_input_text}")
1004
+
1005
+ # Check if person_id is valid
1006
+ if not person_id:
1007
+ print("Error: No person_id provided")
1008
+ return "Please select a person first."
1009
+
1010
+ # Get the people choices dictionary
1011
+ people_choices = get_people_choices()
1012
+ print(f"People choices: {people_choices}")
1013
+
1014
+ # Extract the actual ID if it's in the format "Name (role)"
1015
+ actual_person_id = person_id
1016
+ if person_id in people_choices:
1017
+ # If the person_id is a display name, get the actual ID
1018
+ actual_person_id = people_choices[person_id]
1019
+ print(f"Extracted actual person ID: {actual_person_id}")
1020
+
1021
+ print(
1022
+ f"People in social graph: {list(social_graph.graph.get('people', {}).keys())}"
1023
+ )
1024
+
1025
+ # Check if person exists in social graph
1026
+ if actual_person_id not in social_graph.graph.get("people", {}):
1027
+ print(f"Error: Person {actual_person_id} not found in social graph")
1028
+ return f"Error: Person {actual_person_id} not found in social graph."
1029
+
1030
+ # Extract the selected response
1031
+ selected_response = extract_response(suggestions_text, response_number)
1032
+
1033
+ if not selected_response:
1034
+ print("Error: Could not extract response")
1035
+ return "Could not find the selected response. Please try generating responses again."
1036
+
1037
+ # Save the conversation
1038
+ print(f"Saving conversation with response: {selected_response[:50]}...")
1039
+ success = save_conversation(
1040
+ actual_person_id, user_input_text, selected_response
1041
+ )
1042
+
1043
+ if success:
1044
+ print("Successfully saved conversation")
1045
+ # Get updated conversation history
1046
+ try:
1047
+ _, _, _, updated_history = on_person_change(actual_person_id)
1048
+ print("Successfully retrieved updated conversation history")
1049
+ return updated_history
1050
+ except Exception as e:
1051
+ print(f"Error retrieving updated conversation history: {e}")
1052
+ return "Conversation saved, but could not retrieve updated history."
1053
+ else:
1054
+ print("Failed to save conversation")
1055
+ return "Failed to save the conversation. Please try again."
1056
+
1057
+ # Set up the response selection button events
1058
+ use_response_1.click(
1059
+ lambda text, person, input_text: use_response(text, 1, person, input_text),
1060
+ inputs=[suggestions_output, person_dropdown, user_input],
1061
+ outputs=[conversation_history],
1062
+ )
1063
+
1064
+ use_response_2.click(
1065
+ lambda text, person, input_text: use_response(text, 2, person, input_text),
1066
+ inputs=[suggestions_output, person_dropdown, user_input],
1067
+ outputs=[conversation_history],
1068
+ )
1069
+
1070
+ use_response_3.click(
1071
+ lambda text, person, input_text: use_response(text, 3, person, input_text),
1072
+ inputs=[suggestions_output, person_dropdown, user_input],
1073
+ outputs=[conversation_history],
1074
+ )
1075
+
1076
  # Launch the app
1077
  if __name__ == "__main__":
1078
  print("Starting application...")
llm_interface.py CHANGED
@@ -177,6 +177,35 @@ My current mood: {mood_description}
177
  if selected_topic:
178
  prompt += f"\nWe are currently discussing {selected_topic}.\n"
179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  # Add the user's message if provided, or set up for conversation initiation
181
  if user_input:
182
  # If user input is provided, we're responding to something
 
177
  if selected_topic:
178
  prompt += f"\nWe are currently discussing {selected_topic}.\n"
179
 
180
+ # Add conversation history if available
181
+ conversation_history = person_context.get("conversation_history", [])
182
+ if conversation_history:
183
+ # Get the two most recent conversations
184
+ recent_conversations = sorted(
185
+ conversation_history, key=lambda x: x.get("timestamp", ""), reverse=True
186
+ )[:2]
187
+
188
+ if recent_conversations:
189
+ prompt += "\nOur recent conversations:\n"
190
+
191
+ for i, conversation in enumerate(recent_conversations):
192
+ # Format the timestamp
193
+ timestamp = conversation.get("timestamp", "")
194
+ try:
195
+ dt = datetime.datetime.fromisoformat(timestamp)
196
+ formatted_date = dt.strftime("%B %d at %I:%M %p")
197
+ except (ValueError, TypeError):
198
+ formatted_date = timestamp
199
+
200
+ prompt += f"\nConversation on {formatted_date}:\n"
201
+
202
+ # Add the messages
203
+ messages = conversation.get("messages", [])
204
+ for message in messages:
205
+ speaker = message.get("speaker", "Unknown")
206
+ text = message.get("text", "")
207
+ prompt += f'{speaker}: "{text}"\n'
208
+
209
  # Add the user's message if provided, or set up for conversation initiation
210
  if user_input:
211
  # If user input is provided, we're responding to something
simple_test.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Simple test script for conversation history.
3
+ """
4
+
5
+ from utils import SocialGraphManager
6
+
7
+ # Initialize the social graph manager
8
+ graph_manager = SocialGraphManager("social_graph.json")
9
+
10
+ # Get a person with conversation history
11
+ person_id = "emma" # Emma has conversation history
12
+ person_context = graph_manager.get_person_context(person_id)
13
+
14
+ # Print the person's conversation history
15
+ print(f"\nConversation history for {person_context.get('name')}:")
16
+ conversation_history = person_context.get("conversation_history", [])
17
+
18
+ if not conversation_history:
19
+ print("No conversation history found.")
20
+ else:
21
+ for i, conversation in enumerate(conversation_history):
22
+ print(f"\nConversation {i+1}:")
23
+
24
+ # Print the timestamp
25
+ timestamp = conversation.get("timestamp", "")
26
+ print(f"Timestamp: {timestamp}")
27
+
28
+ # Print the messages
29
+ messages = conversation.get("messages", [])
30
+ for message in messages:
31
+ speaker = message.get("speaker", "Unknown")
32
+ text = message.get("text", "")
33
+ print(f" {speaker}: \"{text}\"")
34
+
35
+ # Test adding a new conversation
36
+ print("\nAdding a new conversation...")
37
+ new_messages = [
38
+ {"speaker": "Emma", "text": "How are you feeling this afternoon?"},
39
+ {"speaker": "Will", "text": "A bit tired, but the new medication seems to be helping with the muscle stiffness."},
40
+ {"speaker": "Emma", "text": "That's good to hear. Do you want me to bring you anything?"},
41
+ {"speaker": "Will", "text": "A cup of tea would be lovely, thanks."}
42
+ ]
43
+
44
+ success = graph_manager.add_conversation(person_id, new_messages)
45
+ if success:
46
+ print("New conversation added successfully.")
47
+ else:
48
+ print("Failed to add new conversation.")
49
+
50
+ # Get the updated person context
51
+ updated_person_context = graph_manager.get_person_context(person_id)
52
+ updated_conversation_history = updated_person_context.get("conversation_history", [])
53
+
54
+ # Print the updated conversation history
55
+ print("\nUpdated conversation history:")
56
+ if not updated_conversation_history:
57
+ print("No conversation history found.")
58
+ else:
59
+ # Count the conversations
60
+ print(f"Found {len(updated_conversation_history)} conversations.")
61
+
62
+ # Get the most recent conversation
63
+ most_recent = sorted(
64
+ updated_conversation_history,
65
+ key=lambda x: x.get("timestamp", ""),
66
+ reverse=True
67
+ )[0]
68
+
69
+ # Print the timestamp
70
+ timestamp = most_recent.get("timestamp", "")
71
+ print(f"Most recent timestamp: {timestamp}")
72
+
73
+ # Print the messages
74
+ messages = most_recent.get("messages", [])
75
+ for message in messages:
76
+ speaker = message.get("speaker", "Unknown")
77
+ text = message.get("text", "")
78
+ print(f" {speaker}: \"{text}\"")
79
+
80
+ print("\nTest completed.")
social_graph.json CHANGED
@@ -10,7 +10,13 @@
10
  "emma": {
11
  "name": "Emma",
12
  "role": "wife",
13
- "topics": ["family plans", "children's activities", "home organization", "weekend outings", "mutual friends"],
 
 
 
 
 
 
14
  "frequency": "daily",
15
  "common_phrases": [
16
  "Have you taken your medication?",
@@ -20,12 +26,114 @@
20
  "Do you need anything from the shops?",
21
  "Your mum called earlier"
22
  ],
23
- "context": "Emma is your wife and partner of 12 years. You met at work where she was in HR. She's been incredibly supportive since your MND diagnosis 5 months ago. She's trying to balance caring for you, the children, and maintaining some normalcy in family life. She's originally from York but has embraced Manchester life."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  },
25
  "mabel": {
26
  "name": "Mabel",
27
  "role": "daughter",
28
- "topics": ["nursery school", "drawing", "dinosaurs", "bedtime stories", "playground"],
 
 
 
 
 
 
29
  "frequency": "daily",
30
  "common_phrases": [
31
  "Daddy, look what I made!",
@@ -40,7 +148,13 @@
40
  "billy": {
41
  "name": "Billy",
42
  "role": "son",
43
- "topics": ["school", "football", "video games", "science experiments", "cycling"],
 
 
 
 
 
 
44
  "frequency": "daily",
45
  "common_phrases": [
46
  "Dad, can you help with my homework?",
@@ -55,7 +169,13 @@
55
  "dr_patel": {
56
  "name": "Dr. Patel",
57
  "role": "neurologist",
58
- "topics": ["MND progression", "symptom management", "medication", "clinical trials", "assistive technology"],
 
 
 
 
 
 
59
  "frequency": "fortnightly",
60
  "common_phrases": [
61
  "How have your symptoms been progressing?",
@@ -65,12 +185,70 @@
65
  "There's a new trial I think you might be eligible for",
66
  "How are you coping mentally?"
67
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  "context": "Dr. Patel is your neurologist at Manchester Royal Infirmary who's been managing your MND since diagnosis 5 months ago. She's direct but compassionate, and always takes time to explain treatment options. She's particularly interested in getting you enrolled in clinical trials that might help slow progression."
69
  },
70
  "jenny": {
71
  "name": "Jenny",
72
  "role": "MND nurse specialist",
73
- "topics": ["daily management", "equipment needs", "NHS services", "symptom control", "practical advice"],
 
 
 
 
 
 
74
  "frequency": "weekly",
75
  "common_phrases": [
76
  "How are you managing at home?",
@@ -85,7 +263,13 @@
85
  "dave": {
86
  "name": "Dave",
87
  "role": "best mate",
88
- "topics": ["programming", "tech news", "football", "old scout adventures", "pub quizzes"],
 
 
 
 
 
 
89
  "frequency": "weekly",
90
  "common_phrases": [
91
  "Fancy watching the match this weekend?",
@@ -95,12 +279,62 @@
95
  "The lads are asking about you",
96
  "You're still better at coding than me, even with one hand!"
97
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  "context": "Dave has been your best mate since Scout days in South East London. He also moved to Manchester for work and has been your rock since the diagnosis. He treats you exactly the same as before, which you appreciate. He's a software developer too and brings industry gossip and tech news when he visits."
99
  },
100
  "mum": {
101
  "name": "Mum",
102
  "role": "mother",
103
- "topics": ["family news", "childhood memories", "South London gossip", "health advice", "grandchildren"],
 
 
 
 
 
 
104
  "frequency": "twice weekly",
105
  "common_phrases": [
106
  "Are you eating properly?",
@@ -110,12 +344,30 @@
110
  "Your sister sends her love",
111
  "Remember when you used to climb that tree in the garden?"
112
  ],
113
- "context": "Your mum still lives in the family home in South East London where you grew up. She's worried sick about your diagnosis but tries to stay positive. She visits monthly and calls twice a week. She tends to fuss and offer well-meaning but sometimes unhelpful health advice. She adores her grandchildren."
 
 
 
 
 
 
 
 
 
 
 
 
114
  },
115
  "alex": {
116
  "name": "Alex",
117
  "role": "work colleague",
118
- "topics": ["programming projects", "work gossip", "flexible working", "accessibility tech", "industry news"],
 
 
 
 
 
 
119
  "frequency": "twice weekly",
120
  "common_phrases": [
121
  "The project is still on track",
@@ -130,7 +382,13 @@
130
  "physio": {
131
  "name": "Claire",
132
  "role": "physiotherapist",
133
- "topics": ["exercises", "mobility", "equipment", "muscle maintenance", "pain management"],
 
 
 
 
 
 
134
  "frequency": "weekly",
135
  "common_phrases": [
136
  "Let's try these new exercises",
@@ -145,7 +403,13 @@
145
  "speech_therapist": {
146
  "name": "Mark",
147
  "role": "speech and language therapist",
148
- "topics": ["speech exercises", "voice banking", "communication devices", "swallowing techniques", "AAC options"],
 
 
 
 
 
 
149
  "frequency": "fortnightly",
150
  "common_phrases": [
151
  "Let's practice those vocal exercises",
 
10
  "emma": {
11
  "name": "Emma",
12
  "role": "wife",
13
+ "topics": [
14
+ "family plans",
15
+ "children's activities",
16
+ "home organization",
17
+ "weekend outings",
18
+ "mutual friends"
19
+ ],
20
  "frequency": "daily",
21
  "common_phrases": [
22
  "Have you taken your medication?",
 
26
  "Do you need anything from the shops?",
27
  "Your mum called earlier"
28
  ],
29
+ "conversation_history": [
30
+ {
31
+ "timestamp": "2025-05-14T18:30:00",
32
+ "messages": [
33
+ {
34
+ "speaker": "Emma",
35
+ "text": "Have you taken your medication today?"
36
+ },
37
+ {
38
+ "speaker": "Will",
39
+ "text": "Yes, I took it about an hour ago. Thanks for checking."
40
+ },
41
+ {
42
+ "speaker": "Emma",
43
+ "text": "Great. How are you feeling now?"
44
+ },
45
+ {
46
+ "speaker": "Will",
47
+ "text": "A bit tired but otherwise okay. The new dosage seems to be working better."
48
+ }
49
+ ]
50
+ },
51
+ {
52
+ "timestamp": "2025-05-15T08:15:00",
53
+ "messages": [
54
+ {
55
+ "speaker": "Emma",
56
+ "text": "Morning love, did you sleep well?"
57
+ },
58
+ {
59
+ "speaker": "Will",
60
+ "text": "Not too bad. Woke up a couple of times but got back to sleep."
61
+ },
62
+ {
63
+ "speaker": "Emma",
64
+ "text": "That's good. I've got to take the kids to school, need anything before I go?"
65
+ },
66
+ {
67
+ "speaker": "Will",
68
+ "text": "I'm all set, thanks. Have a good day."
69
+ }
70
+ ]
71
+ },
72
+ {
73
+ "timestamp": "2025-05-15T17:19:41.821319",
74
+ "messages": [
75
+ {
76
+ "speaker": "Emma",
77
+ "text": "How are you feeling this afternoon?"
78
+ },
79
+ {
80
+ "speaker": "Will",
81
+ "text": "A bit tired, but the new medication seems to be helping with the muscle stiffness."
82
+ },
83
+ {
84
+ "speaker": "Emma",
85
+ "text": "That's good to hear. Do you want me to bring you anything?"
86
+ },
87
+ {
88
+ "speaker": "Will",
89
+ "text": "A cup of tea would be lovely, thanks."
90
+ }
91
+ ]
92
+ },
93
+ {
94
+ "timestamp": "2025-05-15T17:20:31.191371",
95
+ "messages": [
96
+ {
97
+ "speaker": "Emma",
98
+ "text": "How are you feeling this afternoon?"
99
+ },
100
+ {
101
+ "speaker": "Will",
102
+ "text": "A bit tired, but the new medication seems to be helping with the muscle stiffness."
103
+ },
104
+ {
105
+ "speaker": "Emma",
106
+ "text": "That's good to hear. Do you want me to bring you anything?"
107
+ },
108
+ {
109
+ "speaker": "Will",
110
+ "text": "A cup of tea would be lovely, thanks."
111
+ }
112
+ ]
113
+ },
114
+ {
115
+ "timestamp": "2025-05-15T17:53:03.681562",
116
+ "messages": [
117
+ {
118
+ "speaker": "Will",
119
+ "text": "\"Hey Emma, Just thinking about that lovely Yorkshire pudding recipe we talked about the other day. Fancy trying it again this weekend?\""
120
+ }
121
+ ]
122
+ }
123
+ ],
124
+ "context": "Emma is your wife and partner of 12 years. You met at work where she was in HR. She's been incredibly supportive since your MND diagnosis 5 months ago. She's trying to balance caring for you, the children, and maintaining some normalcy in family life. She's originally from York but has embraced Manchester life.",
125
+ "mood": 3
126
  },
127
  "mabel": {
128
  "name": "Mabel",
129
  "role": "daughter",
130
+ "topics": [
131
+ "nursery school",
132
+ "drawing",
133
+ "dinosaurs",
134
+ "bedtime stories",
135
+ "playground"
136
+ ],
137
  "frequency": "daily",
138
  "common_phrases": [
139
  "Daddy, look what I made!",
 
148
  "billy": {
149
  "name": "Billy",
150
  "role": "son",
151
+ "topics": [
152
+ "school",
153
+ "football",
154
+ "video games",
155
+ "science experiments",
156
+ "cycling"
157
+ ],
158
  "frequency": "daily",
159
  "common_phrases": [
160
  "Dad, can you help with my homework?",
 
169
  "dr_patel": {
170
  "name": "Dr. Patel",
171
  "role": "neurologist",
172
+ "topics": [
173
+ "MND progression",
174
+ "symptom management",
175
+ "medication",
176
+ "clinical trials",
177
+ "assistive technology"
178
+ ],
179
  "frequency": "fortnightly",
180
  "common_phrases": [
181
  "How have your symptoms been progressing?",
 
185
  "There's a new trial I think you might be eligible for",
186
  "How are you coping mentally?"
187
  ],
188
+ "conversation_history": [
189
+ {
190
+ "timestamp": "2025-05-08T14:00:00",
191
+ "messages": [
192
+ {
193
+ "speaker": "Dr. Patel",
194
+ "text": "How have you been since our last appointment, Will?"
195
+ },
196
+ {
197
+ "speaker": "Will",
198
+ "text": "The fatigue has been worse, especially in the afternoons."
199
+ },
200
+ {
201
+ "speaker": "Dr. Patel",
202
+ "text": "I see. And how about the new medication regimen?"
203
+ },
204
+ {
205
+ "speaker": "Will",
206
+ "text": "It seems to help with the muscle stiffness, but not so much with the fatigue."
207
+ },
208
+ {
209
+ "speaker": "Dr. Patel",
210
+ "text": "Let's adjust the dosage and timing. I'd like you to take it earlier in the day."
211
+ }
212
+ ]
213
+ },
214
+ {
215
+ "timestamp": "2025-04-24T15:30:00",
216
+ "messages": [
217
+ {
218
+ "speaker": "Dr. Patel",
219
+ "text": "I've reviewed your latest test results. Your respiratory function is stable."
220
+ },
221
+ {
222
+ "speaker": "Will",
223
+ "text": "That's good news. I've been doing the breathing exercises daily."
224
+ },
225
+ {
226
+ "speaker": "Dr. Patel",
227
+ "text": "Excellent. There's a new clinical trial starting that might be suitable for you."
228
+ },
229
+ {
230
+ "speaker": "Will",
231
+ "text": "I'd be interested in learning more about it."
232
+ },
233
+ {
234
+ "speaker": "Dr. Patel",
235
+ "text": "I'll send you the details. It involves a new medication that may help slow progression."
236
+ }
237
+ ]
238
+ }
239
+ ],
240
  "context": "Dr. Patel is your neurologist at Manchester Royal Infirmary who's been managing your MND since diagnosis 5 months ago. She's direct but compassionate, and always takes time to explain treatment options. She's particularly interested in getting you enrolled in clinical trials that might help slow progression."
241
  },
242
  "jenny": {
243
  "name": "Jenny",
244
  "role": "MND nurse specialist",
245
+ "topics": [
246
+ "daily management",
247
+ "equipment needs",
248
+ "NHS services",
249
+ "symptom control",
250
+ "practical advice"
251
+ ],
252
  "frequency": "weekly",
253
  "common_phrases": [
254
  "How are you managing at home?",
 
263
  "dave": {
264
  "name": "Dave",
265
  "role": "best mate",
266
+ "topics": [
267
+ "programming",
268
+ "tech news",
269
+ "football",
270
+ "old scout adventures",
271
+ "pub quizzes"
272
+ ],
273
  "frequency": "weekly",
274
  "common_phrases": [
275
  "Fancy watching the match this weekend?",
 
279
  "The lads are asking about you",
280
  "You're still better at coding than me, even with one hand!"
281
  ],
282
+ "conversation_history": [
283
+ {
284
+ "timestamp": "2025-05-13T19:45:00",
285
+ "messages": [
286
+ {
287
+ "speaker": "Dave",
288
+ "text": "Did you see that new JavaScript framework everyone's talking about?"
289
+ },
290
+ {
291
+ "speaker": "Will",
292
+ "text": "Yeah, seems like overkill for most projects. Classic JavaScript hype cycle."
293
+ },
294
+ {
295
+ "speaker": "Dave",
296
+ "text": "Exactly! But the boss wants us to evaluate it for the next project."
297
+ },
298
+ {
299
+ "speaker": "Will",
300
+ "text": "Let me know how it goes. I'll help review the code if you need another pair of eyes."
301
+ }
302
+ ]
303
+ },
304
+ {
305
+ "timestamp": "2025-05-10T16:20:00",
306
+ "messages": [
307
+ {
308
+ "speaker": "Dave",
309
+ "text": "United match was incredible yesterday! Did you watch it?"
310
+ },
311
+ {
312
+ "speaker": "Will",
313
+ "text": "Every minute! That last-minute goal had me shouting so loud I scared the kids."
314
+ },
315
+ {
316
+ "speaker": "Dave",
317
+ "text": "Haha! Classic. We should watch the next one together if you're up for it."
318
+ },
319
+ {
320
+ "speaker": "Will",
321
+ "text": "Definitely. Bring some of that craft beer you mentioned last time."
322
+ }
323
+ ]
324
+ }
325
+ ],
326
  "context": "Dave has been your best mate since Scout days in South East London. He also moved to Manchester for work and has been your rock since the diagnosis. He treats you exactly the same as before, which you appreciate. He's a software developer too and brings industry gossip and tech news when he visits."
327
  },
328
  "mum": {
329
  "name": "Mum",
330
  "role": "mother",
331
+ "topics": [
332
+ "family news",
333
+ "childhood memories",
334
+ "South London gossip",
335
+ "health advice",
336
+ "grandchildren"
337
+ ],
338
  "frequency": "twice weekly",
339
  "common_phrases": [
340
  "Are you eating properly?",
 
344
  "Your sister sends her love",
345
  "Remember when you used to climb that tree in the garden?"
346
  ],
347
+ "context": "Your mum still lives in the family home in South East London where you grew up. She's worried sick about your diagnosis but tries to stay positive. She visits monthly and calls twice a week. She tends to fuss and offer well-meaning but sometimes unhelpful health advice. She adores her grandchildren.",
348
+ "mood": 3,
349
+ "conversation_history": [
350
+ {
351
+ "timestamp": "2025-05-15T17:44:21.814155",
352
+ "messages": [
353
+ {
354
+ "speaker": "Will",
355
+ "text": "\"Hi Mum, just checking in. How's your week been so far?\""
356
+ }
357
+ ]
358
+ }
359
+ ]
360
  },
361
  "alex": {
362
  "name": "Alex",
363
  "role": "work colleague",
364
+ "topics": [
365
+ "programming projects",
366
+ "work gossip",
367
+ "flexible working",
368
+ "accessibility tech",
369
+ "industry news"
370
+ ],
371
  "frequency": "twice weekly",
372
  "common_phrases": [
373
  "The project is still on track",
 
382
  "physio": {
383
  "name": "Claire",
384
  "role": "physiotherapist",
385
+ "topics": [
386
+ "exercises",
387
+ "mobility",
388
+ "equipment",
389
+ "muscle maintenance",
390
+ "pain management"
391
+ ],
392
  "frequency": "weekly",
393
  "common_phrases": [
394
  "Let's try these new exercises",
 
403
  "speech_therapist": {
404
  "name": "Mark",
405
  "role": "speech and language therapist",
406
+ "topics": [
407
+ "speech exercises",
408
+ "voice banking",
409
+ "communication devices",
410
+ "swallowing techniques",
411
+ "AAC options"
412
+ ],
413
  "frequency": "fortnightly",
414
  "common_phrases": [
415
  "Let's practice those vocal exercises",
test_conversation_history.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test script to demonstrate the conversation history functionality.
3
+ """
4
+
5
+ import json
6
+ import datetime
7
+ from utils import SocialGraphManager
8
+ from llm_interface import LLMInterface
9
+
10
+ def test_conversation_history():
11
+ """Test the conversation history functionality."""
12
+ print("\n=== Testing Conversation History ===")
13
+
14
+ # Initialize the social graph manager
15
+ graph_manager = SocialGraphManager("social_graph.json")
16
+
17
+ # Get a person with conversation history
18
+ person_id = "emma" # Emma has conversation history
19
+ person_context = graph_manager.get_person_context(person_id)
20
+
21
+ # Print the person's conversation history
22
+ print(f"\nConversation history for {person_context.get('name')}:")
23
+ conversation_history = person_context.get("conversation_history", [])
24
+
25
+ if not conversation_history:
26
+ print("No conversation history found.")
27
+ else:
28
+ # Sort by timestamp (most recent first)
29
+ sorted_history = sorted(
30
+ conversation_history,
31
+ key=lambda x: x.get("timestamp", ""),
32
+ reverse=True
33
+ )
34
+
35
+ for i, conversation in enumerate(sorted_history):
36
+ # Format the timestamp
37
+ timestamp = conversation.get("timestamp", "")
38
+ try:
39
+ dt = datetime.datetime.fromisoformat(timestamp)
40
+ formatted_date = dt.strftime("%B %d, %Y at %I:%M %p")
41
+ except (ValueError, TypeError):
42
+ formatted_date = timestamp
43
+
44
+ print(f"\nConversation {i+1} on {formatted_date}:")
45
+
46
+ # Print the messages
47
+ messages = conversation.get("messages", [])
48
+ for message in messages:
49
+ speaker = message.get("speaker", "Unknown")
50
+ text = message.get("text", "")
51
+ print(f" {speaker}: \"{text}\"")
52
+
53
+ # Test adding a new conversation
54
+ print("\nAdding a new conversation...")
55
+ new_messages = [
56
+ {"speaker": "Emma", "text": "How are you feeling this afternoon?"},
57
+ {"speaker": "Will", "text": "A bit tired, but the new medication seems to be helping with the muscle stiffness."},
58
+ {"speaker": "Emma", "text": "That's good to hear. Do you want me to bring you anything?"},
59
+ {"speaker": "Will", "text": "A cup of tea would be lovely, thanks."}
60
+ ]
61
+
62
+ success = graph_manager.add_conversation(person_id, new_messages)
63
+ if success:
64
+ print("New conversation added successfully.")
65
+ else:
66
+ print("Failed to add new conversation.")
67
+
68
+ # Get the updated person context
69
+ updated_person_context = graph_manager.get_person_context(person_id)
70
+ updated_conversation_history = updated_person_context.get("conversation_history", [])
71
+
72
+ # Print the updated conversation history
73
+ print("\nUpdated conversation history:")
74
+ if not updated_conversation_history:
75
+ print("No conversation history found.")
76
+ else:
77
+ # Get the most recent conversation
78
+ most_recent = sorted(
79
+ updated_conversation_history,
80
+ key=lambda x: x.get("timestamp", ""),
81
+ reverse=True
82
+ )[0]
83
+
84
+ # Format the timestamp
85
+ timestamp = most_recent.get("timestamp", "")
86
+ try:
87
+ dt = datetime.datetime.fromisoformat(timestamp)
88
+ formatted_date = dt.strftime("%B %d, %Y at %I:%M %p")
89
+ except (ValueError, TypeError):
90
+ formatted_date = timestamp
91
+
92
+ print(f"\nMost recent conversation on {formatted_date}:")
93
+
94
+ # Print the messages
95
+ messages = most_recent.get("messages", [])
96
+ for message in messages:
97
+ speaker = message.get("speaker", "Unknown")
98
+ text = message.get("text", "")
99
+ print(f" {speaker}: \"{text}\"")
100
+
101
+ # Test generating a suggestion with conversation history
102
+ print("\nGenerating a suggestion with conversation history...")
103
+ llm_interface = LLMInterface()
104
+
105
+ if llm_interface.model_loaded:
106
+ # Store the original generate_suggestion method
107
+ original_method = llm_interface.generate_suggestion
108
+
109
+ # Create a mock method to print the prompt
110
+ def mock_generate_suggestion(*args, **kwargs):
111
+ """Mock method to print the prompt instead of sending it to the LLM."""
112
+ # Call the original method up to the point where it builds the prompt
113
+ person_context = args[0]
114
+ user_input = args[1] if len(args) > 1 else kwargs.get("user_input")
115
+
116
+ # Extract context information
117
+ name = person_context.get("name", "")
118
+ role = person_context.get("role", "")
119
+ topics = person_context.get("topics", [])
120
+ context = person_context.get("context", "")
121
+ selected_topic = person_context.get("selected_topic", "")
122
+ frequency = person_context.get("frequency", "")
123
+ mood = person_context.get("mood", 3)
124
+
125
+ # Get mood description
126
+ mood_descriptions = {
127
+ 1: "I'm feeling quite down and sad today. My responses might be more subdued.",
128
+ 2: "I'm feeling a bit low today. I might be less enthusiastic than usual.",
129
+ 3: "I'm feeling okay today - neither particularly happy nor sad.",
130
+ 4: "I'm feeling pretty good today. I'm in a positive mood.",
131
+ 5: "I'm feeling really happy and upbeat today! I'm in a great mood.",
132
+ }
133
+ mood_description = mood_descriptions.get(mood, mood_descriptions[3])
134
+
135
+ # Get current date and time
136
+ current_datetime = datetime.datetime.now()
137
+ current_time = current_datetime.strftime("%I:%M %p")
138
+ current_day = current_datetime.strftime("%A")
139
+ current_date = current_datetime.strftime("%B %d, %Y")
140
+
141
+ # Build enhanced prompt
142
+ prompt = f"""I am Will, a 38-year-old with MND (Motor Neuron Disease) from Manchester.
143
+ I am talking to {name}, who is my {role}.
144
+ About {name}: {context}
145
+ We typically talk about: {', '.join(topics)}
146
+ We communicate {frequency}.
147
+
148
+ Current time: {current_time}
149
+ Current day: {current_day}
150
+ Current date: {current_date}
151
+
152
+ My current mood: {mood_description}
153
+ """
154
+
155
+ # Add communication style based on relationship
156
+ if role in ["wife", "son", "daughter", "mother", "father"]:
157
+ prompt += "I communicate with my family in a warm, loving way, sometimes using inside jokes.\n"
158
+ elif role in ["doctor", "therapist", "nurse"]:
159
+ prompt += "I communicate with healthcare providers in a direct, informative way.\n"
160
+ elif role in ["best mate", "friend"]:
161
+ prompt += "I communicate with friends casually, often with humor and sometimes swearing.\n"
162
+ elif role in ["work colleague", "boss"]:
163
+ prompt += "I communicate with colleagues professionally but still friendly.\n"
164
+
165
+ # Add topic information if provided
166
+ if selected_topic:
167
+ prompt += f"\nWe are currently discussing {selected_topic}.\n"
168
+
169
+ # Add conversation history if available
170
+ conversation_history = person_context.get("conversation_history", [])
171
+ if conversation_history:
172
+ # Get the two most recent conversations
173
+ recent_conversations = sorted(
174
+ conversation_history,
175
+ key=lambda x: x.get("timestamp", ""),
176
+ reverse=True
177
+ )[:2]
178
+
179
+ if recent_conversations:
180
+ prompt += "\nOur recent conversations:\n"
181
+
182
+ for conversation in recent_conversations:
183
+ # Format the timestamp
184
+ timestamp = conversation.get("timestamp", "")
185
+ try:
186
+ dt = datetime.datetime.fromisoformat(timestamp)
187
+ formatted_date = dt.strftime("%B %d at %I:%M %p")
188
+ except (ValueError, TypeError):
189
+ formatted_date = timestamp
190
+
191
+ prompt += f"\nConversation on {formatted_date}:\n"
192
+
193
+ # Add the messages
194
+ messages = conversation.get("messages", [])
195
+ for message in messages:
196
+ speaker = message.get("speaker", "Unknown")
197
+ text = message.get("text", "")
198
+ prompt += f'{speaker}: "{text}"\n'
199
+
200
+ # Print the prompt
201
+ print("\n=== PROMPT WITH CONVERSATION HISTORY ===")
202
+ print(prompt)
203
+ print("=======================================\n")
204
+
205
+ # Return a mock response
206
+ return "This is a mock response to test conversation history inclusion in the prompt."
207
+
208
+ # Replace the original method with our mock method
209
+ llm_interface.generate_suggestion = mock_generate_suggestion
210
+
211
+ # Test with a user input
212
+ user_input = "Do you think you'll be up for dinner with the kids tonight?"
213
+ llm_interface.generate_suggestion(updated_person_context, user_input=user_input)
214
+
215
+ # Restore the original method
216
+ llm_interface.generate_suggestion = original_method
217
+ else:
218
+ print("LLM model not loaded, skipping prompt generation test.")
219
+
220
+ print("\nTest completed.")
221
+
222
+ if __name__ == "__main__":
223
+ test_conversation_history()
test_llm_with_history.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test script for LLM interface with conversation history.
3
+ """
4
+
5
+ from utils import SocialGraphManager
6
+ from llm_interface import LLMInterface
7
+
8
+ # Initialize the social graph manager
9
+ graph_manager = SocialGraphManager("social_graph.json")
10
+
11
+ # Get a person with conversation history
12
+ person_id = "emma" # Emma has conversation history
13
+ person_context = graph_manager.get_person_context(person_id)
14
+
15
+ # Initialize the LLM interface
16
+ llm_interface = LLMInterface()
17
+
18
+ # Store the original generate_suggestion method
19
+ original_method = llm_interface.generate_suggestion
20
+
21
+ # Create a mock method to print the prompt
22
+ def mock_generate_suggestion(*args, **kwargs):
23
+ """Mock method to print the prompt instead of sending it to the LLM."""
24
+ # Extract the prompt that would be sent to the LLM
25
+ person_context = args[0]
26
+ user_input = args[1] if len(args) > 1 else kwargs.get("user_input")
27
+
28
+ # Build the prompt as the original method would
29
+ # Extract context information
30
+ name = person_context.get("name", "")
31
+ role = person_context.get("role", "")
32
+ topics = person_context.get("topics", [])
33
+ context = person_context.get("context", "")
34
+ selected_topic = person_context.get("selected_topic", "")
35
+ frequency = person_context.get("frequency", "")
36
+ mood = person_context.get("mood", 3)
37
+
38
+ # Get mood description
39
+ mood_descriptions = {
40
+ 1: "I'm feeling quite down and sad today. My responses might be more subdued.",
41
+ 2: "I'm feeling a bit low today. I might be less enthusiastic than usual.",
42
+ 3: "I'm feeling okay today - neither particularly happy nor sad.",
43
+ 4: "I'm feeling pretty good today. I'm in a positive mood.",
44
+ 5: "I'm feeling really happy and upbeat today! I'm in a great mood.",
45
+ }
46
+ mood_description = mood_descriptions.get(mood, mood_descriptions[3])
47
+
48
+ # Get current date and time
49
+ import datetime
50
+ current_datetime = datetime.datetime.now()
51
+ current_time = current_datetime.strftime("%I:%M %p")
52
+ current_day = current_datetime.strftime("%A")
53
+ current_date = current_datetime.strftime("%B %d, %Y")
54
+
55
+ # Build enhanced prompt
56
+ prompt = f"""I am Will, a 38-year-old with MND (Motor Neuron Disease) from Manchester.
57
+ I am talking to {name}, who is my {role}.
58
+ About {name}: {context}
59
+ We typically talk about: {', '.join(topics)}
60
+ We communicate {frequency}.
61
+
62
+ Current time: {current_time}
63
+ Current day: {current_day}
64
+ Current date: {current_date}
65
+
66
+ My current mood: {mood_description}
67
+ """
68
+
69
+ # Add communication style based on relationship
70
+ if role in ["wife", "son", "daughter", "mother", "father"]:
71
+ prompt += "I communicate with my family in a warm, loving way, sometimes using inside jokes.\n"
72
+ elif role in ["doctor", "therapist", "nurse"]:
73
+ prompt += "I communicate with healthcare providers in a direct, informative way.\n"
74
+ elif role in ["best mate", "friend"]:
75
+ prompt += "I communicate with friends casually, often with humor and sometimes swearing.\n"
76
+ elif role in ["work colleague", "boss"]:
77
+ prompt += "I communicate with colleagues professionally but still friendly.\n"
78
+
79
+ # Add topic information if provided
80
+ if selected_topic:
81
+ prompt += f"\nWe are currently discussing {selected_topic}.\n"
82
+
83
+ # Add conversation history if available
84
+ conversation_history = person_context.get("conversation_history", [])
85
+ if conversation_history:
86
+ # Get the two most recent conversations
87
+ recent_conversations = sorted(
88
+ conversation_history,
89
+ key=lambda x: x.get("timestamp", ""),
90
+ reverse=True
91
+ )[:2]
92
+
93
+ if recent_conversations:
94
+ prompt += "\nOur recent conversations:\n"
95
+
96
+ for conversation in recent_conversations:
97
+ # Format the timestamp
98
+ timestamp = conversation.get("timestamp", "")
99
+ try:
100
+ dt = datetime.datetime.fromisoformat(timestamp)
101
+ formatted_date = dt.strftime("%B %d at %I:%M %p")
102
+ except (ValueError, TypeError):
103
+ formatted_date = timestamp
104
+
105
+ prompt += f"\nConversation on {formatted_date}:\n"
106
+
107
+ # Add the messages
108
+ messages = conversation.get("messages", [])
109
+ for message in messages:
110
+ speaker = message.get("speaker", "Unknown")
111
+ text = message.get("text", "")
112
+ prompt += f'{speaker}: "{text}"\n'
113
+
114
+ # Add the user's message if provided, or set up for conversation initiation
115
+ if user_input:
116
+ # If user input is provided, we're responding to something
117
+ prompt += f'\n{name} just said to me: "{user_input}"\n'
118
+ prompt += f"I want to respond directly to what {name} just said.\n"
119
+ else:
120
+ # No user input means we're initiating a conversation
121
+ if selected_topic:
122
+ # If a topic is selected, initiate conversation about that topic
123
+ prompt += f"\nI'm about to start a conversation with {name} about {selected_topic}.\n"
124
+ prompt += f"I want to initiate a conversation about {selected_topic} in a natural way.\n"
125
+ else:
126
+ # Generic conversation starter
127
+ prompt += f"\nI'm about to start a conversation with {name}.\n"
128
+ prompt += "I want to initiate a conversation in a natural way based on our relationship.\n"
129
+
130
+ # Print the prompt
131
+ print("\n=== PROMPT WITH CONVERSATION HISTORY ===")
132
+ print(prompt)
133
+ print("=======================================\n")
134
+
135
+ # Return a mock response
136
+ return "This is a mock response to test conversation history inclusion in the prompt."
137
+
138
+ # Replace the original method with our mock method
139
+ llm_interface.generate_suggestion = mock_generate_suggestion
140
+
141
+ # Test with a user input
142
+ user_input = "Do you think you'll be up for dinner with the kids tonight?"
143
+ llm_interface.generate_suggestion(person_context, user_input=user_input)
144
+
145
+ # Restore the original method
146
+ llm_interface.generate_suggestion = original_method
utils.py CHANGED
@@ -2,6 +2,8 @@ import json
2
  import random
3
  import threading
4
  import time
 
 
5
  from typing import Dict, List, Any, Optional
6
  from sentence_transformers import SentenceTransformer
7
  import numpy as np
@@ -148,6 +150,155 @@ class SocialGraphManager:
148
 
149
  return utterances
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  class SuggestionGenerator:
153
  """Generates contextual suggestions for the AAC system."""
 
2
  import random
3
  import threading
4
  import time
5
+ import datetime
6
+ import os
7
  from typing import Dict, List, Any, Optional
8
  from sentence_transformers import SentenceTransformer
9
  import numpy as np
 
150
 
151
  return utterances
152
 
153
+ def get_conversation_history(
154
+ self, person_id: str, max_conversations: int = 2
155
+ ) -> List[Dict[str, Any]]:
156
+ """Get recent conversation history for a specific person.
157
+
158
+ Args:
159
+ person_id: ID of the person to get conversation history for
160
+ max_conversations: Maximum number of recent conversations to return
161
+
162
+ Returns:
163
+ List of conversation history entries, most recent first
164
+ """
165
+ if person_id not in self.graph.get("people", {}):
166
+ return []
167
+
168
+ person_data = self.graph["people"][person_id]
169
+ conversation_history = person_data.get("conversation_history", [])
170
+
171
+ # Sort by timestamp (most recent first)
172
+ sorted_history = sorted(
173
+ conversation_history, key=lambda x: x.get("timestamp", ""), reverse=True
174
+ )
175
+
176
+ # Return the most recent conversations
177
+ return sorted_history[:max_conversations]
178
+
179
+ def add_conversation(self, person_id: str, messages: List[Dict[str, str]]) -> bool:
180
+ """Add a new conversation to a person's history.
181
+
182
+ Args:
183
+ person_id: ID of the person to add conversation for
184
+ messages: List of message objects with "speaker" and "text" fields
185
+
186
+ Returns:
187
+ True if successful, False otherwise
188
+ """
189
+ if person_id not in self.graph.get("people", {}):
190
+ return False
191
+
192
+ # Create a new conversation entry
193
+ import datetime
194
+
195
+ new_conversation = {
196
+ "timestamp": datetime.datetime.now().isoformat(),
197
+ "messages": messages,
198
+ }
199
+
200
+ # Add to the person's conversation history
201
+ if "conversation_history" not in self.graph["people"][person_id]:
202
+ self.graph["people"][person_id]["conversation_history"] = []
203
+
204
+ self.graph["people"][person_id]["conversation_history"].append(new_conversation)
205
+
206
+ # Save the updated graph
207
+ return self._save_graph()
208
+
209
+ def _save_graph(self) -> bool:
210
+ """Save the social graph to the JSON file.
211
+
212
+ Returns:
213
+ True if successful, False otherwise
214
+ """
215
+ try:
216
+ print(f"Saving social graph to {self.graph_path}")
217
+ # Check if the file is writable
218
+ if os.path.exists(self.graph_path):
219
+ if not os.access(self.graph_path, os.W_OK):
220
+ print(f"Error: No write permission for {self.graph_path}")
221
+ return False
222
+
223
+ # Save the graph
224
+ with open(self.graph_path, "w") as f:
225
+ json.dump(self.graph, f, indent=2)
226
+
227
+ print("Social graph saved successfully")
228
+ return True
229
+ except Exception as e:
230
+ print(f"Error saving social graph: {e}")
231
+ import traceback
232
+
233
+ traceback.print_exc()
234
+ return False
235
+
236
+ def summarize_conversation(self, conversation: Dict[str, Any]) -> str:
237
+ """Generate a summary of a conversation.
238
+
239
+ Args:
240
+ conversation: Conversation entry with timestamp and messages
241
+
242
+ Returns:
243
+ A summary string of the conversation
244
+ """
245
+ if not conversation or "messages" not in conversation:
246
+ return "No conversation data available"
247
+
248
+ messages = conversation.get("messages", [])
249
+ if not messages:
250
+ return "No messages in conversation"
251
+
252
+ # Extract timestamp and format it
253
+ timestamp = conversation.get("timestamp", "")
254
+ try:
255
+ dt = datetime.datetime.fromisoformat(timestamp)
256
+ formatted_date = dt.strftime("%B %d, %Y at %I:%M %p")
257
+ except (ValueError, TypeError):
258
+ formatted_date = timestamp
259
+
260
+ # Create a brief summary
261
+ topic_keywords = set()
262
+ for message in messages:
263
+ # Extract potential keywords from messages
264
+ text = message.get("text", "").lower()
265
+ # Simple keyword extraction - could be improved with NLP
266
+ words = [
267
+ w
268
+ for w in text.split()
269
+ if len(w) > 4
270
+ and w
271
+ not in [
272
+ "about",
273
+ "would",
274
+ "could",
275
+ "should",
276
+ "their",
277
+ "there",
278
+ "these",
279
+ "those",
280
+ "where",
281
+ "which",
282
+ "today",
283
+ "tomorrow",
284
+ ]
285
+ ]
286
+ topic_keywords.update(words[:3]) # Add up to 3 keywords per message
287
+
288
+ # Limit to 5 most representative keywords
289
+ topic_keywords = list(topic_keywords)[:5]
290
+
291
+ # Create summary
292
+ first_speaker = messages[0].get("speaker", "Unknown") if messages else "Unknown"
293
+ message_count = len(messages)
294
+
295
+ summary = f"Conversation on {formatted_date}: {first_speaker} initiated a {message_count}-message conversation"
296
+
297
+ if topic_keywords:
298
+ summary += f" about {', '.join(topic_keywords)}"
299
+
300
+ return summary
301
+
302
 
303
  class SuggestionGenerator:
304
  """Generates contextual suggestions for the AAC system."""