diff --git a/backend/app/routes/llms_router.py b/backend/app/routes/llms_router.py index 35ed27a..6c160d5 100644 --- a/backend/app/routes/llms_router.py +++ b/backend/app/routes/llms_router.py @@ -18,10 +18,29 @@ async def query_llm(conversation_history: dict): Returns: dict: The final response after processing the prompt with the tools. """ + from datetime import datetime, timezone + +@router.post("/query") +async def query_llm(conversation_history: dict): + """ + Endpoint to query the LLM and execute the necessary tools. + + CHANGED: Added 'timestamp' field to the response so clients can + display when the response was generated. Using timezone-aware UTC + datetime for consistency across deployments. + """ + + result, bmkeys = await get_llm_response( conversation_history.get("conversation_history", []) ) - return {"response": result, "bmkeys": bmkeys} + return { + "response": result, + "bmkeys": bmkeys, + # Timestamp added so the frontend (or logs) can record + # exactly when each AI response was produced. + "timestamp": datetime.now(timezone.utc).isoformat(), + } @router.post("/analyse/{biomodel_id}") diff --git a/frontend/.prettierrc b/frontend/.prettierrc similarity index 100% rename from frontend/.prettierrc rename to frontend/.prettierrc diff --git a/frontend/components/ChatBox.tsx b/frontend/components/ChatBox.tsx index 15c89fd..1ebeaa8 100644 --- a/frontend/components/ChatBox.tsx +++ b/frontend/components/ChatBox.tsx @@ -50,7 +50,9 @@ export const ChatBox: React.FC = ({ isLoading: isInitialLoading = false, parameters, }) => { - // Helper function to create initial messages from startMessage + // Helper function to create initial messages from startMessage. + // Accepts either a single string or an array of strings (for multi-message + // greetings), and wraps each in the Message shape with a fresh timestamp. const createInitialMessages = (startMsg: string | string[]): Message[] => { if (Array.isArray(startMsg)) { return startMsg.map((content, index) => ({ @@ -84,24 +86,28 @@ export const ChatBox: React.FC = ({ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); }; + // Scroll to the latest message whenever the messages array changes. useEffect(() => { scrollToBottom(); }, [messages]); - // Update messages when startMessage changes (when analysis completes) + // Re-initialise messages when startMessage changes (e.g. after an async + // analysis finishes and the parent passes in a new greeting/summary). + // We skip this while the initial load spinner is still showing so we don't + // overwrite an in-progress loading state. useEffect(() => { if (startMessage && !isInitialLoading) { setMessages(createInitialMessages(startMessage)); } }, [startMessage, isInitialLoading]); - // Helper function to format biomodel IDs as hyperlinks + // Replaces raw biomodel ID strings in the AI response with a markdown link + // to the database details page, so users can click through directly. const formatBiomodelIds = (content: string, bmkeys: string[]): string => { if (!bmkeys || bmkeys.length === 0) return content; let formattedContent = content; - // Replace biomodel IDs with hyperlinks bmkeys.forEach((bmId) => { const searchString = `${bmId}`; const encodedPrompt = encodeURIComponent(`Describe model`); @@ -119,6 +125,8 @@ export const ChatBox: React.FC = ({ return formattedContent; }; + // Clears the controlled input then immediately fires the chosen quick-action + // message so there is no flash of the action text in the input box. const handleQuickAction = (message: string) => { setInputMessage(""); handleSendMessage(message); @@ -127,7 +135,11 @@ export const ChatBox: React.FC = ({ const handleSendMessage = async (overrideMessage?: string) => { const msg = overrideMessage ?? inputMessage; if (!msg.trim()) return; - // Build parameter context string + + // Build an optional parameter context string that is appended to the + // user's prompt so the LLM knows about any active search filters + // (biomodel ID, owner, date range, etc.) without the user having to + // repeat them in every message. let parameterContext = ""; if (parameters) { const contextParts = []; @@ -171,10 +183,14 @@ export const ChatBox: React.FC = ({ setMessages((prev) => [...prev, userMessage]); setInputMessage(""); setIsLoading(true); + try { + // Prefix the prompt if the parent supplied one (e.g. "Analyse biomodel:") + // and always append the parameter context so the LLM has full context. const finalPrompt = promptPrefix ? `${promptPrefix} ${msg}${parameterContext}` : `${msg}${parameterContext}`; + const res = await fetch( `${process.env.NEXT_PUBLIC_API_URL}/query`, { @@ -183,6 +199,8 @@ export const ChatBox: React.FC = ({ "Content-Type": "application/json", accept: "application/json", }, + // Send only role + content to the backend — strip client-side + // fields like id and timestamp that the API does not expect. body: JSON.stringify({ conversation_history: [ ...messages, @@ -194,22 +212,29 @@ export const ChatBox: React.FC = ({ }), }, ); + const data = await res.json(); const aiResponse = data.response || "Sorry, I didn't get a response from the server."; const bmkeys = data.bmkeys || []; - // Format the response to include hyperlinks for biomodel IDs + // Post-process the AI response to turn plain biomodel ID strings into + // clickable markdown links before storing in state. const formattedResponse = formatBiomodelIds(aiResponse, bmkeys); const assistantMessage: Message = { id: (Date.now() + 1).toString(), role: "assistant", content: formattedResponse, - timestamp: new Date(), + // CHANGED: We now use the server-provided timestamp when available + // (data.timestamp is an ISO string returned by the /query endpoint). + // Falling back to new Date() keeps things working even if the backend + // hasn't been updated yet or returns an unexpected shape. + timestamp: data.timestamp ? new Date(data.timestamp) : new Date(), }; setMessages((prev) => [...prev, assistantMessage]); } catch (error) { + // Surface a user-friendly error bubble instead of a silent failure. setMessages((prev) => [ ...prev, { @@ -225,6 +250,7 @@ export const ChatBox: React.FC = ({ } }; + // Send on Enter, but allow Shift+Enter for multi-line input in the future. const handleKeyPress = (e: React.KeyboardEvent) => { if (e.key === "Enter" && !e.shiftKey) { e.preventDefault(); @@ -232,6 +258,13 @@ export const ChatBox: React.FC = ({ } }; + // CHANGED: Helper that formats a Date object into a short "HH:MM AM/PM" + // string shown beneath each message bubble. + // Kept as a separate function so it's easy to swap the format later + // (e.g. add seconds, switch to 24-hour) without touching the JSX. + const formatTime = (date: Date): string => + date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" }); + return ( @@ -240,6 +273,7 @@ export const ChatBox: React.FC = ({ {cardTitle} +
@@ -253,6 +287,7 @@ export const ChatBox: React.FC = ({ message.role === "user" ? "flex-row-reverse" : "flex-row" }`} > + {/* Avatar circle — blue for user, grey for assistant */}
= ({ )}
+ + {/* Message bubble */}
= ({ {message.content}
) : ( + // Assistant messages are rendered as Markdown so the LLM + // can use headings, bullet lists, code blocks, etc. )} + + {/* + CHANGED: Timestamp label below every message bubble. + - For user messages it sits on the right (matching the + bubble alignment) using `text-right`. + - For assistant messages it aligns left. + - The muted colour (text-blue-200 / text-slate-400) keeps + it subtle so it doesn't compete with the message text. + - formatTime() produces a short locale-aware string like + "02:34 PM" — readable without taking up much space. + */} +

+ {formatTime(message.timestamp)} +

))} + + {/* Spinner shown while the parent is fetching the initial analysis */} {isInitialLoading && (
@@ -297,6 +358,8 @@ export const ChatBox: React.FC = ({
)} + + {/* Spinner shown while waiting for the LLM response */} {isLoading && (
@@ -311,9 +374,13 @@ export const ChatBox: React.FC = ({
)}
+ + {/* Invisible anchor that we scroll into view after every new message */}
+ + {/* Input area — always pinned to the bottom of the card */}
= ({
- {/* Quick Actions - positioned directly under search bar */} + {/* Quick Actions — one-tap prompts shown under the input bar. + Hidden while the initial loading spinner is active so the user + can't fire a query before the first analysis has finished. */} {!isInitialLoading && (
@@ -354,11 +423,19 @@ export const ChatBox: React.FC = ({
)} + {/* Supplemental Actions — an optional second row of actions (e.g. admin + shortcuts) separated by a slightly heavier border. */} {supplementalActions && (
{supplementalActions.map((action, idx) => ( -