diff --git a/core/llm/openaiTypeConverters.ts b/core/llm/openaiTypeConverters.ts index 0a500bc126..e58970ca82 100644 --- a/core/llm/openaiTypeConverters.ts +++ b/core/llm/openaiTypeConverters.ts @@ -19,38 +19,80 @@ export function toChatMessage( tool_call_id: message.toolCallId, }; } - - if (typeof message.content === "string") { - return { - role: message.role, - content: message.content === "" ? " " : message.content, // LM Studio API doesn't accept empty strings - }; - } else if (!message.content.some((item) => item.type !== "text")) { - // If no multi-media is in the message, just send as text - // for compatibility with OpenAI-"compatible" servers - // that don't support multi-media format + if (message.role === "system") { return { - ...message, - content: message.content.map((item) => item.text).join(""), + role: "system", + content: message.content, }; } - const parts = message.content.map((part) => { - const msg: any = { - type: part.type, - text: part.text, - }; - if (part.type === "imageUrl") { - msg.image_url = { ...part.imageUrl, detail: "auto" }; - msg.type = "image_url"; - } - return msg; - }); - - return { - ...message, - content: parts, + let msg: ChatCompletionMessageParam = { + role: message.role, + content: + typeof message.content === "string" + ? message.content === "" + ? " " + : message.content + : !message.content.some((item) => item.type !== "text") + ? message.content.map((item) => item.text).join("") + : message.content.map((part) => { + const msg: any = { + type: part.type, + text: part.text, + }; + if (part.type === "imageUrl") { + msg.image_url = { ...part.imageUrl, detail: "auto" }; + msg.type = "image_url"; + } + return msg; + }), }; + if ( + message.role === "assistant" && + message.toolCalls && + msg.role === "assistant" + ) { + msg.tool_calls = message.toolCalls.map((toolCall) => ({ + id: toolCall.id!, + type: toolCall.type!, + function: { + name: toolCall.function?.name!, + arguments: toolCall.function?.arguments!, + }, + })); + } + return msg; + // if (typeof message.content === "string") { + // return { + // role: message.role, + // content: message.content === "" ? " " : message.content, // LM Studio API doesn't accept empty strings + // }; + // } else if (!message.content.some((item) => item.type !== "text")) { + // // If no multi-media is in the message, just send as text + // // for compatibility with OpenAI-"compatible" servers + // // that don't support multi-media format + // return { + // ...message, + // content: message.content.map((item) => item.text).join(""), + // }; + // } + + // const parts = message.content.map((part) => { + // const msg: any = { + // type: part.type, + // text: part.text, + // }; + // if (part.type === "imageUrl") { + // msg.image_url = { ...part.imageUrl, detail: "auto" }; + // msg.type = "image_url"; + // } + // return msg; + // }); + + // return { + // ...message, + // content: parts, + // }; } export function toChatBody( diff --git a/core/llm/toolSupport.ts b/core/llm/toolSupport.ts index 017ef7a883..93d72751a4 100644 --- a/core/llm/toolSupport.ts +++ b/core/llm/toolSupport.ts @@ -11,15 +11,15 @@ export const PROVIDER_TOOL_SUPPORT: Record< return true; } }, - // openai: (model) => { - // if ( - // ["gpt-4", "o1", "chatgpt-4o-latest"].some((part) => - // model.toLowerCase().startsWith(part), - // ) - // ) { - // return true; - // } - // }, + openai: (model) => { + if ( + ["gpt-4", "o1", "chatgpt-4o-latest"].some((part) => + model.toLowerCase().startsWith(part), + ) + ) { + return true; + } + }, // https://ollama.com/search?c=tools ollama: (model) => { if ( diff --git a/gui/src/components/StepContainer/ResponseActions.tsx b/gui/src/components/StepContainer/ResponseActions.tsx index bda0d4a6cd..f016b34749 100644 --- a/gui/src/components/StepContainer/ResponseActions.tsx +++ b/gui/src/components/StepContainer/ResponseActions.tsx @@ -14,7 +14,6 @@ export interface ResponseActionsProps { index: number; onDelete: () => void; item: ChatHistoryItem; - shouldHideActions: boolean; } export default function ResponseActions({ @@ -23,7 +22,6 @@ export default function ResponseActions({ item, isTruncated, onDelete, - shouldHideActions, }: ResponseActionsProps) { const isInEditMode = useAppSelector(selectIsInEditMode); @@ -33,37 +31,33 @@ export default function ResponseActions({ return (
- {shouldHideActions || ( - <> - {isTruncated && ( - - - - )} + {isTruncated && ( + + + + )} - - - + + + - + - - - )} +
); } diff --git a/gui/src/components/StepContainer/StepContainer.tsx b/gui/src/components/StepContainer/StepContainer.tsx index f6a1dd1eed..5709f7f606 100644 --- a/gui/src/components/StepContainer/StepContainer.tsx +++ b/gui/src/components/StepContainer/StepContainer.tsx @@ -35,9 +35,8 @@ export default function StepContainer(props: StepContainerProps) { ); const uiConfig = useAppSelector(selectUIConfig); - const shouldHideActions = - (isStreaming && props.isLast) || - historyItemAfterThis?.message.role === "assistant"; + const hideActionSpace = historyItemAfterThis?.message.role === "assistant"; + const hideActions = hideActionSpace || (isStreaming && props.isLast); // const isStepAheadOfCurCheckpoint = // isInEditMode && Math.floor(props.index / 2) > curCheckpointIndex; @@ -100,18 +99,19 @@ export default function StepContainer(props: StepContainerProps) { {props.isLast && } {/* We want to occupy space in the DOM regardless of whether the actions are visible to avoid jank on stream complete */} -
- {!shouldHideActions && ( - - )} -
+ {!hideActionSpace && ( +
+ {!hideActions && ( + + )} +
+ )} ); } diff --git a/gui/src/redux/slices/sessionSlice.ts b/gui/src/redux/slices/sessionSlice.ts index ff3f90a9b1..e50d597c7d 100644 --- a/gui/src/redux/slices/sessionSlice.ts +++ b/gui/src/redux/slices/sessionSlice.ts @@ -18,7 +18,6 @@ import { PromptLog, Session, SessionMetadata, - ToolCall, ToolCallDelta, ToolCallState, } from "core"; @@ -284,9 +283,6 @@ export const sessionSlice = createSlice({ }, streamUpdate: (state, action: PayloadAction) => { if (state.history.length) { - const lastItem = state.history[state.history.length - 1]; - const lastMessage = lastItem.message; - function toolCallDeltaToState( toolCallDelta: ToolCallDelta, ): ToolCallState { @@ -309,13 +305,18 @@ export const sessionSlice = createSlice({ } for (const message of action.payload) { + const lastItem = state.history[state.history.length - 1]; + const lastMessage = lastItem.message; if ( - message.role && - (lastMessage.role !== message.role || - // This is when a tool call comes after assistant text - (lastMessage.content !== "" && - message.role === "assistant" && - message.toolCalls?.length)) + lastMessage.role !== message.role || + // This is for when a tool call comes immediately before/after tool call + (lastMessage.role === "assistant" && + message.role === "assistant" && + // Last message isn't completely new + !(!lastMessage.toolCalls?.length && !lastMessage.content) && + // And there's a difference in tool call presence + (lastMessage.toolCalls?.length ?? 0) !== + (message.toolCalls?.length ?? 0)) ) { // Create a new message const historyItem: ChatHistoryItemWithMessageId = { diff --git a/gui/src/redux/thunks/streamThunkWrapper.tsx b/gui/src/redux/thunks/streamThunkWrapper.tsx index cd9f10f6d1..11ef6a07eb 100644 --- a/gui/src/redux/thunks/streamThunkWrapper.tsx +++ b/gui/src/redux/thunks/streamThunkWrapper.tsx @@ -17,6 +17,7 @@ export const streamThunkWrapper = createAsyncThunk< dispatch(setDialogMessage()); dispatch(setShowDialog(true)); } finally { + console.log(getState().session.history); dispatch(setInactive()); const state = getState(); if (state.session.mode === "chat") {