Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 130 additions & 6 deletions apps/mobile/app/(tabs)/agent.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import { useApp } from "@/lib/app/app-provider";
import { requireOwnerAuth } from "@/lib/security/owner-auth";
import { loadWallet, type WalletSnapshot } from "@/lib/wallet/wallet";
import { useTransfer } from "@/lib/agent/use-transfer";
import { useAgentChatDemo, useAgentChatLive, type ToolCall } from "@/lib/agent/use-agent-chat";
import { GhostButton, IconButton, PrimaryButton } from "@/ui/buttons";
import { AppIcon } from "@/ui/app-icon";
import { Badge } from "@/ui/badge";
Expand All @@ -35,6 +36,61 @@ function shortenHex(input: string): string {
return `${s.slice(0, 10)}…${s.slice(-6)}`;
}

/** Keys that may contain sensitive data */
const SENSITIVE_KEYS = [
"privateKey", "private_key", "secret", "apiKey", "api_key", "key",
"address", "addresses", "balance", "balances", "txHash", "tx_hash",
"signature", "secretKey", "mnemonic", "seed", "password", "token",
];
Comment thread
coderabbitai[bot] marked this conversation as resolved.
Outdated

/** Sanitize tool call params/result for display */
function sanitizeForDisplay(data: unknown, maxLen = 100): string {
if (data === null || data === undefined) return "";

// If it's a string, check for sensitive patterns
if (typeof data === "string") {
// Truncate long strings
if (data.length > maxLen) {
return data.slice(0, maxLen) + "…";
}
return data;
}

// If it's an object, filter sensitive keys
if (typeof data === "object") {
const obj = data as Record<string, unknown>;
const sanitized: Record<string, unknown> = {};

for (const [key, value] of Object.entries(obj)) {
const lowerKey = key.toLowerCase();
const isSensitive = SENSITIVE_KEYS.some((sk) => lowerKey.includes(sk));

if (isSensitive) {
sanitized[key] = "[redacted]";
} else if (typeof value === "object" && value !== null) {
sanitized[key] = "[object]";
} else {
sanitized[key] = value;
}
}

let str = JSON.stringify(sanitized);
if (str.length > maxLen) {
str = str.slice(0, maxLen) + "…";
}
return str;
Comment thread
omarespejel marked this conversation as resolved.
}

// Fallback
const str = String(data);
return str.length > maxLen ? str.slice(0, maxLen) + "…" : str;
}

/** Type guard for messages with isStreaming property */
function hasIsStreaming(m: { isStreaming?: boolean }): boolean {
return typeof m.isStreaming === "boolean";
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.

export default function AgentScreen() {
const t = useAppTheme();
const insets = useSafeAreaInsets();
Expand All @@ -52,6 +108,14 @@ export default function AgentScreen() {
}
}, [isLive]);

// Always call both hooks (React rule: hooks cannot be conditional)
const [demoChatState, demoChatActions] = useAgentChatDemo();
const [liveChatState, liveChatActions] = useAgentChatLive();

// Use appropriate chat based on mode
const chatState = isLive ? liveChatState : demoChatState;
const chatActions = isLive ? liveChatActions : demoChatActions;

const [draft, setDraft] = React.useState("");

const pending = state.agent.proposals.filter((p) => p.status === "pending");
Expand Down Expand Up @@ -302,13 +366,30 @@ export default function AgentScreen() {
<View style={{ gap: 10 }}>
<Row>
<H2>Conversation</H2>
<Muted>{state.agent.messages.length} messages</Muted>
<Muted>{isLive ? chatState.messages.length : state.agent.messages.length} messages</Muted>
</Row>
<View style={{ gap: 10 }}>
{state.agent.messages.map((m) => (
<MessageBubble key={m.id} role={m.role} text={m.text} />
{(isLive ? chatState.messages : state.agent.messages).map((m) => (
<MessageBubble key={m.id} role={m.role} text={m.text} isStreaming={hasIsStreaming(m) ? m.isStreaming : false} />
))}
Comment thread
coderabbitai[bot] marked this conversation as resolved.
</View>

{/* Show error if any */}
{chatState.error && (
<View style={{ padding: 10, borderRadius: t.radius.md, backgroundColor: "rgba(255,69,58,0.10)" }}>
<Muted style={{ color: t.colors.bad }}>{chatState.error}</Muted>
</View>
)}
Comment thread
coderabbitai[bot] marked this conversation as resolved.

{/* Show tool calls in live mode */}
{isLive && chatState.toolCalls.length > 0 && (
<View style={{ gap: 8 }}>
<Muted>Tool Calls</Muted>
{chatState.toolCalls.map((tc) => (
<ToolCallCard key={tc.id} toolCall={tc} />
))}
</View>
)}
</View>
</GlassCard>
</Animated.View>
Expand Down Expand Up @@ -353,8 +434,8 @@ export default function AgentScreen() {
}}
/>
<IconButton
disabled={!draft.trim() || transfer.phase === "preparing" || transfer.phase === "executing"}
tone={draft.trim() && transfer.phase !== "preparing" && transfer.phase !== "executing" ? "accent" : "neutral"}
disabled={!draft.trim() || transfer.phase === "preparing" || transfer.phase === "executing" || chatState.isResponding}
tone={draft.trim() && transfer.phase !== "preparing" && transfer.phase !== "executing" && !chatState.isResponding ? "accent" : "neutral"}
onPress={async () => {
await haptic("tap");
const text = draft.trim();
Expand All @@ -363,6 +444,14 @@ export default function AgentScreen() {
// Live mode: check if it's a transfer request
if (isLive && isTransferRequest(text)) {
await transfer.prepare(text);
} else if (isLive) {
// Live mode: send to LLM chat with error handling
try {
await chatActions.sendMessage(text);
} catch (err) {
await haptic("error");
console.error("Chat send error:", err);
}
Comment thread
omarespejel marked this conversation as resolved.
Comment thread
omarespejel marked this conversation as resolved.
} else {
// Demo mode or non-transfer message
actions.sendAgentMessage(text);
Expand All @@ -378,6 +467,37 @@ export default function AgentScreen() {
);
}

/** Tool call card for live mode - shows tool execution status */
function ToolCallCard(props: { toolCall: ToolCall }) {
const t = useAppTheme();
const { toolCall } = props;

const statusLabel = toolCall.status === "success" ? "✓" : toolCall.status === "error" ? "✗" : "…";

return (
<View style={{
padding: 10,
borderRadius: t.radius.md,
backgroundColor: t.scheme === "dark" ? "rgba(255,255,255,0.05)" : "rgba(255,255,255,0.6)",
borderWidth: 1,
borderColor: t.colors.glassBorder,
}}>
<Row>
<Body style={{ fontFamily: t.font.bodySemibold }}>{toolCall.toolName}</Body>
<Badge label={statusLabel} tone={toolCall.status === "success" ? "good" : toolCall.status === "error" ? "danger" : "warn"} />
</Row>
<Muted style={{ fontSize: 11, marginTop: 4 }}>
{sanitizeForDisplay(toolCall.params)}
</Muted>
{toolCall.result && (
<Muted style={{ fontSize: 11, marginTop: 4, color: t.colors.muted }}>
{sanitizeForDisplay(toolCall.result)}
</Muted>
)}
Comment thread
coderabbitai[bot] marked this conversation as resolved.
</View>
);
}

function PromptChip(props: { label: string; onPress: () => void }) {
return (
<Chip
Expand Down Expand Up @@ -414,7 +534,7 @@ function ToggleRow(props: { title: string; body: string; value: boolean; onChang
);
}

function MessageBubble(props: { role: "user" | "assistant"; text: string }) {
function MessageBubble(props: { role: "user" | "assistant"; text: string; isStreaming?: boolean }) {
const t = useAppTheme();
const isUser = props.role === "user";
const borderA = isUser
Expand All @@ -432,8 +552,12 @@ function MessageBubble(props: { role: "user" | "assistant"; text: string }) {
: t.scheme === "dark"
? "rgba(255,255,255,0.05)"
: "rgba(255,255,255,0.60)";

return (
<View style={{ alignSelf: isUser ? "flex-end" : "flex-start", maxWidth: "92%" }}>
{props.isStreaming && (
<Muted style={{ fontSize: 10, marginBottom: 2 }}>typing...</Muted>
)}
<LinearGradient
colors={[borderA, borderB]}
start={{ x: 0.1, y: 0.0 }}
Expand Down
73 changes: 62 additions & 11 deletions apps/mobile/lib/agent-runtime/openai-adapter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,75 @@ const DEFAULT_MODEL = "gpt-4o-mini";
// SSE line parser
// ---------------------------------------------------------------------------

function parseSseLine(line: string): StreamChunk | null {
if (!line.startsWith("data: ")) return null;
function parseSseLine(line: string): StreamChunk[] {
if (!line.startsWith("data: ")) return [];
const data = line.slice(6).trim();
if (data === "[DONE]") return { type: "done", finishReason: "stop" };
if (data === "[DONE]") return [{ type: "done", finishReason: "stop" }];

try {
const json = JSON.parse(data);
const delta = json?.choices?.[0]?.delta;
const finishReason = json?.choices?.[0]?.finish_reason;

if (finishReason === "stop" || finishReason === "length") {
return { type: "done", finishReason };
return [{ type: "done", finishReason }];
}

const text = delta?.content;
if (typeof text === "string" && text.length > 0) {
return { type: "delta", text };
return [{ type: "delta", text }];
}

// Check for tool calls
const toolCalls = delta?.tool_calls;
if (Array.isArray(toolCalls) && toolCalls.length > 0) {
const results: StreamChunk[] = [];

for (const tc of toolCalls) {
if (tc?.id && tc?.function?.name) {
// Accumulate arguments - they may come in fragments
const tcId = tc.id;
const funcName = tc.function.name;
const newArgsFragment = tc.function.arguments || "";

// Use a buffer to accumulate (we'd need to track this per-call-id)
// For now, try to parse - if it fails, the arguments are incomplete
let args = {};
if (newArgsFragment) {
// Try to parse the complete arguments string
// Note: In streaming, arguments come as JSON fragments
try {
// If it's a complete JSON object, parse it
if (newArgsFragment.startsWith("{") && newArgsFragment.endsWith("}")) {
args = JSON.parse(newArgsFragment);
}
} catch {
// Partial arguments - emit with empty args, will be completed in next chunk
// For now, store as empty - the LLM should send complete args
}
}

results.push({
type: "tool_call",
toolCall: {
id: tcId,
name: funcName,
arguments: args,
},
});
}
}
Comment thread
omarespejel marked this conversation as resolved.

// Return all tool calls instead of just the first one
if (results.length > 0) {
return results;
}
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.
} catch {
// Malformed JSON — skip.
}

return null;
return [];
}

// ---------------------------------------------------------------------------
Expand Down Expand Up @@ -103,16 +149,20 @@ async function* streamSse(
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed) continue;
const chunk = parseSseLine(trimmed);
if (chunk) yield chunk;
if (chunk?.type === "done") return;
const chunks = parseSseLine(trimmed);
for (const chunk of chunks) {
yield chunk;
if (chunk.type === "done") return;
}
}
}

// Process any remaining buffer.
if (buffer.trim()) {
const chunk = parseSseLine(buffer.trim());
if (chunk) yield chunk;
const chunks = parseSseLine(buffer.trim());
for (const chunk of chunks) {
yield chunk;
}
}

// If we never received [DONE], emit one.
Expand Down Expand Up @@ -173,6 +223,7 @@ export function createOpenAiProvider(apiKey: string): LlmProvider {
};
if (opts.maxTokens != null) body.max_tokens = opts.maxTokens;
if (opts.temperature != null) body.temperature = opts.temperature;
if (opts.tools != null) body.tools = opts.tools;

const generator = streamSse(
`${OPENAI_BASE}/chat/completions`,
Expand Down
23 changes: 22 additions & 1 deletion apps/mobile/lib/agent-runtime/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,12 @@
// Messages
// ---------------------------------------------------------------------------

export type ChatRole = "system" | "user" | "assistant";
export type ChatRole = "system" | "user" | "assistant" | "tool";

export type ChatMessage = {
role: ChatRole;
content: string;
toolCallId?: string;
};

// ---------------------------------------------------------------------------
Expand All @@ -23,15 +24,25 @@ export type ChatMessage = {
/** A single chunk emitted during streaming. */
export type StreamChunk =
| { type: "delta"; text: string }
| { type: "tool_call"; toolCall: ParsedToolCall }
| { type: "done"; finishReason: "stop" | "length" | "error" };

/** Parsed tool call from LLM response */
export type ParsedToolCall = {
id: string;
name: string;
arguments: Record<string, unknown>;
};

export type StreamOptions = {
/** Model ID to use (e.g. "gpt-4o-mini"). */
model: string;
/** System prompt prepended to the conversation. */
systemPrompt?: string;
/** Conversation messages. */
messages: ChatMessage[];
/** Tools available to the model. */
tools?: OpenAITool[];
/** Max tokens to generate. Default: provider-specific. */
maxTokens?: number;
/** Sampling temperature. Default: provider-specific. */
Expand All @@ -40,6 +51,16 @@ export type StreamOptions = {
timeoutMs?: number;
};

/** OpenAI function calling tool schema */
export type OpenAITool = {
type: "function";
function: {
name: string;
description: string;
parameters: Record<string, unknown>;
};
};

/** Async iterable of stream chunks. Call `cancel()` to abort early. */
export type ChatStream = AsyncIterable<StreamChunk> & {
cancel: () => void;
Expand Down
Loading
Loading