Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 22 additions & 7 deletions playwright/app.spec.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { expect, test } from '@playwright/test'
import type { Page } from '@playwright/test'
import { defaultGitHubChatModel } from '../src/modules/github-api.js'

const webServerMode = process.env.PLAYWRIGHT_WEB_SERVER_MODE ?? 'dev'
const appEntryPath = webServerMode === 'preview' ? '/index.html' : '/src/index.html'
Expand All @@ -12,6 +13,8 @@ type ChatRequestMessage = {
type ChatRequestBody = {
metadata?: unknown
messages?: ChatRequestMessage[]
model?: string
stream?: boolean
}

const waitForAppReady = async (page: Page, path = appEntryPath) => {
Expand Down Expand Up @@ -256,6 +259,7 @@ test('AI chat prefers streaming responses when available', async ({ page }) => {
)

expect(streamRequestBody?.metadata).toBeUndefined()
expect(streamRequestBody?.model).toBe(defaultGitHubChatModel)
const systemMessage = streamRequestBody?.messages?.find(
(message: ChatRequestMessage) => message.role === 'system',
)
Expand Down Expand Up @@ -335,9 +339,14 @@ test('AI chat falls back to non-streaming response when streaming fails', async
}) => {
let streamAttemptCount = 0
let fallbackAttemptCount = 0
const attemptedModels: string[] = []

await page.route('https://models.github.ai/inference/chat/completions', async route => {
const body = route.request().postDataJSON() as { stream?: boolean } | null
const body = route.request().postDataJSON() as ChatRequestBody | null
if (typeof body?.model === 'string') {
attemptedModels.push(body.model)
}

if (body?.stream) {
streamAttemptCount += 1
await route.fulfill({
Expand Down Expand Up @@ -373,6 +382,10 @@ test('AI chat falls back to non-streaming response when streaming fails', async
await connectByotWithSingleRepo(page)
await ensureAiChatDrawerOpen(page)

const selectedModel = 'openai/gpt-5-mini'
await page.locator('#ai-chat-model').selectOption(selectedModel)
await expect(page.locator('#ai-chat-model')).toHaveValue(selectedModel)

await page.locator('#ai-chat-prompt').fill('Use fallback path.')
await page.locator('#ai-chat-send').click()

Expand All @@ -383,6 +396,8 @@ test('AI chat falls back to non-streaming response when streaming fails', async
)
expect(streamAttemptCount).toBeGreaterThan(0)
expect(fallbackAttemptCount).toBeGreaterThan(0)
expect(attemptedModels.length).toBeGreaterThan(0)
expect(attemptedModels.every(model => model === selectedModel)).toBe(true)
})

test('BYOT remembers selected repository across reloads', async ({ page }) => {
Expand Down Expand Up @@ -433,7 +448,7 @@ test('BYOT remembers selected repository across reloads', async ({ page }) => {
test('renders default playground preview', async ({ page }) => {
await waitForInitialRender(page)

await page.getByLabel('ShadowRoot (open)').uncheck()
await page.getByLabel('ShadowRoot').uncheck()
await expect(page.locator('#status')).toHaveText('Rendered')
await expectPreviewHasRenderedContent(page)
})
Expand Down Expand Up @@ -668,7 +683,7 @@ test('renders in react mode with css modules', async ({ page }) => {
await ensurePanelToolsVisible(page, 'component')
await ensurePanelToolsVisible(page, 'styles')

await page.getByLabel('ShadowRoot (open)').uncheck()
await page.getByLabel('ShadowRoot').uncheck()
await page.locator('#render-mode').selectOption('react')
await page.locator('#style-mode').selectOption('module')
await expect(page.locator('#status')).toHaveText('Rendered')
Expand All @@ -678,7 +693,7 @@ test('renders in react mode with css modules', async ({ page }) => {
test('transpiles TypeScript annotations in component source', async ({ page }) => {
await waitForInitialRender(page)

await page.getByLabel('ShadowRoot (open)').uncheck()
await page.getByLabel('ShadowRoot').uncheck()
await setComponentEditorSource(
page,
[
Expand Down Expand Up @@ -762,7 +777,7 @@ test('react mode executes default React import without TDZ runtime failure', asy

await ensurePanelToolsVisible(page, 'component')

await page.getByLabel('ShadowRoot (open)').uncheck()
await page.getByLabel('ShadowRoot').uncheck()
await page.locator('#render-mode').selectOption('react')
await setComponentEditorSource(
page,
Expand Down Expand Up @@ -854,7 +869,7 @@ test('renders with less style mode', async ({ page }) => {

await ensurePanelToolsVisible(page, 'styles')

await page.getByLabel('ShadowRoot (open)').uncheck()
await page.getByLabel('ShadowRoot').uncheck()
await page.locator('#style-mode').selectOption('less')
await expect(page.locator('#status')).toHaveText('Rendered')
await expectPreviewHasRenderedContent(page)
Expand All @@ -865,7 +880,7 @@ test('renders with sass style mode', async ({ page }) => {

await ensurePanelToolsVisible(page, 'styles')

await page.getByLabel('ShadowRoot (open)').uncheck()
await page.getByLabel('ShadowRoot').uncheck()
await page.locator('#style-mode').selectOption('sass')
await expect(page.locator('#status')).toHaveText('Rendered')
await expectPreviewHasRenderedContent(page)
Expand Down
4 changes: 4 additions & 0 deletions src/app.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ const aiChatDrawer = document.getElementById('ai-chat-drawer')
const aiChatClose = document.getElementById('ai-chat-close')
const aiChatClear = document.getElementById('ai-chat-clear')
const aiChatPrompt = document.getElementById('ai-chat-prompt')
const aiChatModel = document.getElementById('ai-chat-model')
const aiChatIncludeEditors = document.getElementById('ai-chat-include-editors')
const aiChatSend = document.getElementById('ai-chat-send')
const aiChatStatus = document.getElementById('ai-chat-status')
Expand Down Expand Up @@ -477,6 +478,7 @@ const githubAiContextState = {
let chatDrawerController = {
setOpen: () => {},
setSelectedRepository: () => {},
setToken: () => {},
dispose: () => {},
}

Expand Down Expand Up @@ -509,6 +511,7 @@ const byotControls = createGitHubByotControls({
onTokenChange: token => {
githubAiContextState.token = token
syncAiChatTokenVisibility(token)
chatDrawerController.setToken(token)
},
setStatus,
})
Expand All @@ -527,6 +530,7 @@ chatDrawerController = createGitHubChatDrawer({
drawer: aiChatDrawer,
closeButton: aiChatClose,
promptInput: aiChatPrompt,
modelSelect: aiChatModel,
includeEditorsContextToggle: aiChatIncludeEditors,
sendButton: aiChatSend,
clearButton: aiChatClear,
Expand Down
20 changes: 12 additions & 8 deletions src/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ <h1>
class="app-grid-ai-controls"
id="github-ai-controls"
role="group"
aria-label="GitHub AI controls"
aria-label="GitHub controls"
hidden
>
<div class="github-token-control-wrap">
Expand Down Expand Up @@ -161,7 +161,7 @@ <h1>
aria-controls="github-ai-controls"
hidden
>
Agent
GitHub
</button>
</div>

Expand Down Expand Up @@ -455,12 +455,12 @@ <h2>Preview</h2>
</label>
<label class="toggle">
<input id="shadow-toggle" type="checkbox" checked />
ShadowRoot (open)
ShadowRoot
</label>
<button
class="hint-icon shadow-hint"
type="button"
aria-label="About ShadowRoot mode"
aria-label="About preview isolation mode"
data-tooltip="Turning ShadowRoot off renders the preview in light DOM, so @knighted/develop styles can affect preview output."
>
i
Expand Down Expand Up @@ -544,9 +544,6 @@ <h2>AI Chat</h2>
</div>

<div class="ai-chat-drawer__meta">
<p class="ai-chat-drawer__repo" id="ai-chat-repository">
No repository selected
</p>
<p class="ai-chat-drawer__status" id="ai-chat-status" data-level="neutral">
Idle
</p>
Expand All @@ -560,9 +557,16 @@ <h2>AI Chat</h2>
class="ai-chat-prompt"
id="ai-chat-prompt"
rows="4"
placeholder="Ask about your selected repository context"
placeholder="Ask for help developing your component and styles"
></textarea>

<label class="ai-chat-model-picker" for="ai-chat-model">
<span class="sr-only">Model</span>
<select id="ai-chat-model" aria-label="Chat model" disabled>
<option value="openai/gpt-4.1-mini" selected>openai/gpt-4.1-mini</option>
</select>
</label>

<label class="ai-chat-context-toggle" for="ai-chat-include-editors">
<input type="checkbox" id="ai-chat-include-editors" checked />
Send JSX + CSS editor context
Expand Down
61 changes: 59 additions & 2 deletions src/modules/github-api.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,53 @@
const githubApiBaseUrl = 'https://api.github.com'
const githubModelsApiUrl = 'https://models.github.ai/inference/chat/completions'

export const defaultGitHubChatModel = 'openai/gpt-4.1-mini'

/* Local model options avoid browser CORS failures when calling catalog endpoints directly. */
export const githubChatModelOptions = [
'openai/gpt-4.1-mini',
'openai/gpt-4.1',
'openai/gpt-4.1-nano',
'openai/gpt-4o',
'openai/gpt-4o-mini',
'openai/gpt-5',
'openai/gpt-5-chat',
'openai/gpt-5-mini',
'openai/gpt-5-nano',
'openai/o1',
'openai/o1-mini',
'openai/o1-preview',
'openai/o3',
'openai/o3-mini',
'openai/o4-mini',
'ai21-labs/ai21-jamba-1.5-large',
'cohere/cohere-command-a',
'cohere/cohere-command-r-08-2024',
'cohere/cohere-command-r-plus-08-2024',
'xai/grok-3',
'xai/grok-3-mini',
'deepseek/deepseek-r1',
'deepseek/deepseek-r1-0528',
'deepseek/deepseek-v3-0324',
'meta/llama-3.2-11b-vision-instruct',
'meta/llama-3.2-90b-vision-instruct',
'meta/llama-3.3-70b-instruct',
'meta/llama-4-maverick-17b-128e-instruct-fp8',
'meta/llama-4-scout-17b-16e-instruct',
'meta/meta-llama-3.1-405b-instruct',
'meta/meta-llama-3.1-8b-instruct',
'mistral-ai/codestral-2501',
'mistral-ai/ministral-3b',
'mistral-ai/mistral-medium-2505',
'mistral-ai/mistral-small-2503',
'microsoft/mai-ds-r1',
'microsoft/phi-4',
'microsoft/phi-4-mini-instruct',
'microsoft/phi-4-mini-reasoning',
'microsoft/phi-4-multimodal-instruct',
'microsoft/phi-4-reasoning',
]

const parseNextPageUrlFromLinkHeader = linkHeader => {
if (typeof linkHeader !== 'string' || !linkHeader.trim()) {
return null
Expand Down Expand Up @@ -352,7 +399,7 @@ export const streamGitHubChatCompletion = async ({
messages,
signal,
onToken,
model = 'openai/gpt-4.1-mini',
model = defaultGitHubChatModel,
}) => {
if (typeof token !== 'string' || token.trim().length === 0) {
throw new Error('A GitHub token is required to start a chat request.')
Expand Down Expand Up @@ -385,6 +432,7 @@ export const streamGitHubChatCompletion = async ({
const reader = response.body.getReader()
let buffered = ''
let combined = ''
let responseModel = ''

while (true) {
// eslint-disable-next-line no-await-in-loop
Expand All @@ -403,6 +451,10 @@ export const streamGitHubChatCompletion = async ({
continue
}

if (!responseModel && typeof body.model === 'string') {
responseModel = body.model
}

const chunk = extractStreamingDeltaText(body)
if (!chunk) {
continue
Expand All @@ -415,6 +467,9 @@ export const streamGitHubChatCompletion = async ({

if (buffered.trim()) {
const body = parseSseDataLine(buffered)
if (body && !responseModel && typeof body.model === 'string') {
responseModel = body.model
}
const chunk = body ? extractStreamingDeltaText(body) : ''
if (chunk) {
combined += chunk
Expand All @@ -428,6 +483,7 @@ export const streamGitHubChatCompletion = async ({

return {
content: combined,
model: responseModel || model,
rateLimit: parseRateMetadata({ headers: response.headers, body: null }),
}
}
Expand All @@ -436,7 +492,7 @@ export const requestGitHubChatCompletion = async ({
token,
messages,
signal,
model = 'openai/gpt-4.1-mini',
model = defaultGitHubChatModel,
}) => {
if (typeof token !== 'string' || token.trim().length === 0) {
throw new Error('A GitHub token is required to start a chat request.')
Expand Down Expand Up @@ -470,6 +526,7 @@ export const requestGitHubChatCompletion = async ({

return {
content,
model: typeof body?.model === 'string' && body.model ? body.model : model,
rateLimit: parseRateMetadata({ headers: response.headers, body }),
}
}
Loading