mishig HF staff commited on
Commit
60216ec
1 Parent(s): f6138b3

same prettier as hub

Browse files
Files changed (30) hide show
  1. .prettierrc +0 -8
  2. .prettierrc.mjs +11 -0
  3. postcss.config.js +2 -2
  4. src/app.css +1 -1
  5. src/lib/components/Icons/IconCaret.svelte +2 -5
  6. src/lib/components/Icons/IconCode.svelte +1 -1
  7. src/lib/components/Icons/IconCopyCode.svelte +1 -1
  8. src/lib/components/Icons/IconCross.svelte +1 -1
  9. src/lib/components/Icons/IconDelete.svelte +2 -5
  10. src/lib/components/Icons/IconPlus.svelte +2 -7
  11. src/lib/components/Icons/IconSearch.svelte +2 -5
  12. src/lib/components/Icons/IconShare.svelte +2 -9
  13. src/lib/components/Icons/IconStar.svelte +1 -1
  14. src/lib/components/InferencePlayground/InferencePlayground.svelte +34 -39
  15. src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte +52 -52
  16. src/lib/components/InferencePlayground/InferencePlaygroundConversation.svelte +7 -7
  17. src/lib/components/InferencePlayground/InferencePlaygroundGenerationConfig.svelte +11 -14
  18. src/lib/components/InferencePlayground/InferencePlaygroundHFTokenModal.svelte +11 -15
  19. src/lib/components/InferencePlayground/InferencePlaygroundMessage.svelte +3 -3
  20. src/lib/components/InferencePlayground/InferencePlaygroundModelSelector.svelte +9 -15
  21. src/lib/components/InferencePlayground/InferencePlaygroundModelSelectorModal.svelte +20 -20
  22. src/lib/components/InferencePlayground/generationConfigSettings.ts +8 -12
  23. src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts +12 -16
  24. src/lib/types/index.d.ts +3 -3
  25. src/routes/+layout.svelte +1 -1
  26. src/routes/+page.server.ts +9 -12
  27. src/routes/+page.svelte +1 -1
  28. svelte.config.js +4 -4
  29. tailwind.config.ts +5 -5
  30. vite.config.ts +3 -3
.prettierrc DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "useTabs": true,
3
- "singleQuote": true,
4
- "trailingComma": "none",
5
- "printWidth": 100,
6
- "plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"],
7
- "overrides": [{ "files": "*.svelte", "options": { "parser": "svelte" } }]
8
- }
 
 
 
 
 
 
 
 
 
.prettierrc.mjs ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export default {
2
+ arrowParens: "avoid",
3
+ quoteProps: "consistent",
4
+ trailingComma: "es5",
5
+ useTabs: true,
6
+ tabWidth: 2,
7
+ printWidth: 120,
8
+ overrides: [{ files: "*.svelte", options: { parser: "svelte" } }],
9
+ tailwindConfig: "./tailwind.config.ts",
10
+ plugins: [import("prettier-plugin-svelte"), import("prettier-plugin-tailwindcss")],
11
+ };
postcss.config.js CHANGED
@@ -1,6 +1,6 @@
1
  export default {
2
  plugins: {
3
  tailwindcss: {},
4
- autoprefixer: {}
5
- }
6
  };
 
1
  export default {
2
  plugins: {
3
  tailwindcss: {},
4
+ autoprefixer: {},
5
+ },
6
  };
src/app.css CHANGED
@@ -1,4 +1,4 @@
1
- @import 'highlight.js/styles/atom-one-light';
2
  @tailwind base;
3
  @tailwind components;
4
  @tailwind utilities;
 
1
+ @import "highlight.js/styles/atom-one-light";
2
  @tailwind base;
3
  @tailwind components;
4
  @tailwind utilities;
src/lib/components/Icons/IconCaret.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
  <svg
@@ -12,8 +12,5 @@
12
  height="1em"
13
  preserveAspectRatio="xMidYMid meet"
14
  viewBox="0 0 24 24"
15
- ><path
16
- d="M16.293 9.293L12 13.586L7.707 9.293l-1.414 1.414L12 16.414l5.707-5.707z"
17
- fill="currentColor"
18
- />
19
  </svg>
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
  <svg
 
12
  height="1em"
13
  preserveAspectRatio="xMidYMid meet"
14
  viewBox="0 0 24 24"
15
+ ><path d="M16.293 9.293L12 13.586L7.707 9.293l-1.414 1.414L12 16.414l5.707-5.707z" fill="currentColor" />
 
 
 
16
  </svg>
src/lib/components/Icons/IconCode.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
  <svg
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
  <svg
src/lib/components/Icons/IconCopyCode.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
  <svg
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
  <svg
src/lib/components/Icons/IconCross.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
  <svg
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
  <svg
src/lib/components/Icons/IconDelete.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
  <svg
@@ -14,10 +14,7 @@
14
  preserveAspectRatio="xMidYMid meet"
15
  viewBox="0 0 32 32"
16
  >
17
- <path d="M12 12h2v12h-2z" fill="currentColor" /><path
18
- d="M18 12h2v12h-2z"
19
- fill="currentColor"
20
- /><path
21
  d="M4 6v2h2v20a2 2 0 0 0 2 2h16a2 2 0 0 0 2-2V8h2V6zm4 22V8h16v20z"
22
  fill="currentColor"
23
  /><path d="M12 2h8v2h-8z" fill="currentColor" />
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
  <svg
 
14
  preserveAspectRatio="xMidYMid meet"
15
  viewBox="0 0 32 32"
16
  >
17
+ <path d="M12 12h2v12h-2z" fill="currentColor" /><path d="M18 12h2v12h-2z" fill="currentColor" /><path
 
 
 
18
  d="M4 6v2h2v20a2 2 0 0 0 2 2h16a2 2 0 0 0 2-2V8h2V6zm4 22V8h16v20z"
19
  fill="currentColor"
20
  /><path d="M12 2h8v2h-8z" fill="currentColor" />
src/lib/components/Icons/IconPlus.svelte CHANGED
@@ -1,13 +1,8 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
- <svg
6
- class={classNames}
7
- xmlns="http://www.w3.org/2000/svg"
8
- width="1em"
9
- height="1em"
10
- viewBox="0 0 32 32"
11
  ><path
12
  fill="currentColor"
13
  d="M16 2A14.172 14.172 0 0 0 2 16a14.172 14.172 0 0 0 14 14a14.172 14.172 0 0 0 14-14A14.172 14.172 0 0 0 16 2Zm8 15h-7v7h-2v-7H8v-2h7V8h2v7h7Z"
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
+ <svg class={classNames} xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 32 32"
 
 
 
 
 
6
  ><path
7
  fill="currentColor"
8
  d="M16 2A14.172 14.172 0 0 0 2 16a14.172 14.172 0 0 0 14 14a14.172 14.172 0 0 0 14-14A14.172 14.172 0 0 0 16 2Zm8 15h-7v7h-2v-7H8v-2h7V8h2v7h7Z"
src/lib/components/Icons/IconSearch.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
  <svg
@@ -14,8 +14,5 @@
14
  preserveAspectRatio="xMidYMid meet"
15
  viewBox="0 0 32 32"
16
  >
17
- <path
18
- d="M30 28.59L22.45 21A11 11 0 1 0 21 22.45L28.59 30zM5 14a9 9 0 1 1 9 9a9 9 0 0 1-9-9z"
19
- fill="currentColor"
20
- />
21
  </svg>
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
  <svg
 
14
  preserveAspectRatio="xMidYMid meet"
15
  viewBox="0 0 32 32"
16
  >
17
+ <path d="M30 28.59L22.45 21A11 11 0 1 0 21 22.45L28.59 30zM5 14a9 9 0 1 1 9 9a9 9 0 0 1-9-9z" fill="currentColor" />
 
 
 
18
  </svg>
src/lib/components/Icons/IconShare.svelte CHANGED
@@ -1,15 +1,8 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
- <svg
6
- class={classNames}
7
- width="1em"
8
- height="1em"
9
- viewBox="0 0 24 25"
10
- fill="none"
11
- xmlns="http://www.w3.org/2000/svg"
12
- >
13
  <path
14
  fill-rule="evenodd"
15
  clip-rule="evenodd"
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
+ <svg class={classNames} width="1em" height="1em" viewBox="0 0 24 25" fill="none" xmlns="http://www.w3.org/2000/svg">
 
 
 
 
 
 
 
6
  <path
7
  fill-rule="evenodd"
8
  clip-rule="evenodd"
src/lib/components/Icons/IconStar.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- export let classNames = '';
3
  </script>
4
 
5
  <svg
 
1
  <script lang="ts">
2
+ export let classNames = "";
3
  </script>
4
 
5
  <svg
src/lib/components/InferencePlayground/InferencePlayground.svelte CHANGED
@@ -3,32 +3,32 @@
3
  createHfInference,
4
  handleStreamingResponse,
5
  handleNonStreamingResponse,
6
- isSystemPromptSupported
7
- } from './inferencePlaygroundUtils';
8
- import GenerationConfig from './InferencePlaygroundGenerationConfig.svelte';
9
- import HFTokenModal from './InferencePlaygroundHFTokenModal.svelte';
10
- import ModelSelector from './InferencePlaygroundModelSelector.svelte';
11
- import Conversation from './InferencePlaygroundConversation.svelte';
12
- import { onDestroy } from 'svelte';
13
- import { type ChatCompletionInputMessage } from '@huggingface/tasks';
14
- import type { ModelEntryWithTokenizer } from '$lib/types';
15
- import { defaultGenerationConfig } from './generationConfigSettings';
16
- import IconShare from '../Icons/IconShare.svelte';
17
- import IconDelete from '../Icons/IconDelete.svelte';
18
- import IconCode from '../Icons/IconCode.svelte';
19
 
20
  export let models: ModelEntryWithTokenizer[];
21
 
22
- const startMessages: ChatCompletionInputMessage[] = [{ role: 'user', content: '' }];
23
 
24
  let conversation: Conversation = {
25
  model: models[0],
26
  config: defaultGenerationConfig,
27
  messages: startMessages,
28
- streaming: true
29
  };
30
 
31
- let systemMessage: ChatCompletionInputMessage = { role: 'system', content: '' };
32
  let hfToken: string | undefined = import.meta.env.VITE_HF_TOKEN;
33
  let viewCode = false;
34
  let showTokenModal = false;
@@ -47,9 +47,9 @@
47
  conversation.messages = [
48
  ...conversation.messages,
49
  {
50
- role: conversation.messages.at(-1)?.role === 'user' ? 'assistant' : 'user',
51
- content: ''
52
- }
53
  ];
54
  }
55
 
@@ -59,7 +59,7 @@
59
  }
60
 
61
  function reset() {
62
- systemMessage.content = '';
63
  conversation.messages = [...startMessages];
64
  }
65
 
@@ -88,14 +88,14 @@
88
  const hf = createHfInference(hfToken);
89
 
90
  if (conversation.streaming) {
91
- const streamingMessage = { role: 'assistant', content: '' };
92
  conversation.messages = [...conversation.messages, streamingMessage];
93
  abortController = new AbortController();
94
 
95
  await handleStreamingResponse(
96
  hf,
97
  conversation,
98
- (content) => {
99
  if (streamingMessage) {
100
  streamingMessage.content = content;
101
  conversation.messages = [...conversation.messages];
@@ -118,8 +118,8 @@
118
 
119
  addMessage();
120
  } catch (error) {
121
- if (error.name !== 'AbortError') {
122
- alert('error: ' + (error as Error).message);
123
  }
124
  } finally {
125
  loading = false;
@@ -128,7 +128,7 @@
128
  }
129
 
130
  function onKeydown(event: KeyboardEvent) {
131
- if (!event.shiftKey && event.key === 'Enter') {
132
  submit();
133
  }
134
  }
@@ -137,9 +137,9 @@
137
  {#if showTokenModal}
138
  <HFTokenModal
139
  on:close={() => (showTokenModal = false)}
140
- on:submit={(e) => {
141
  const formData = new FormData(e.target);
142
- hfToken = formData.get('hf-token');
143
  submit();
144
  showTokenModal = false;
145
  }}
@@ -161,8 +161,8 @@
161
  name=""
162
  id=""
163
  placeholder={systemPromptSupported
164
- ? 'Enter a custom prompt'
165
- : 'System prompt is not supported with the chosen model.'}
166
  bind:value={systemMessage.content}
167
  class="absolute inset-x-0 bottom-0 h-full resize-none bg-transparent px-3 pt-10 text-sm outline-none"
168
  ></textarea>
@@ -176,7 +176,7 @@
176
  index={0}
177
  {viewCode}
178
  on:addMessage={addMessage}
179
- on:deleteMessage={(e) => deleteMessage(e.detail)}
180
  />
181
  </div>
182
  <div
@@ -186,9 +186,7 @@
186
  type="button"
187
  class="flex h-[39px] flex-none gap-2 rounded-lg border border-gray-200 bg-white px-3 py-2.5 text-sm font-medium text-gray-900 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:outline-none focus:ring-4 focus:ring-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
188
  >
189
- <div
190
- class="flex size-5 items-center justify-center rounded border border-black/5 bg-black/5 text-xs"
191
- >
192
  <IconShare />
193
  </div>
194
 
@@ -211,7 +209,7 @@
211
  class="flex h-[39px] items-center gap-2 rounded-lg border border-gray-200 bg-white px-3 py-2.5 text-sm font-medium text-gray-900 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:outline-none focus:ring-4 focus:ring-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
212
  >
213
  <IconCode />
214
- {!viewCode ? 'View Code' : 'Hide Code'}</button
215
  >
216
  <button
217
  on:click={() => {
@@ -240,8 +238,7 @@
240
  />
241
  </div>
242
  {:else}
243
- Run <span
244
- class="inline-flex gap-0.5 rounded border border-white/20 bg-white/10 px-0.5 text-xs text-white/70"
245
  >↵</span
246
  >
247
  {/if}
@@ -257,9 +254,7 @@
257
  <GenerationConfig bind:conversation />
258
  <div class="mt-auto">
259
  <div class="mb-3 flex items-center justify-between gap-2">
260
- <label for="default-range" class="block text-sm font-medium text-gray-900 dark:text-white"
261
- >API Quota</label
262
- >
263
  <span
264
  class="rounded bg-gray-100 px-1.5 py-0.5 text-xs font-medium text-gray-800 dark:bg-gray-700 dark:text-gray-300"
265
  >Free</span
 
3
  createHfInference,
4
  handleStreamingResponse,
5
  handleNonStreamingResponse,
6
+ isSystemPromptSupported,
7
+ } from "./inferencePlaygroundUtils";
8
+ import GenerationConfig from "./InferencePlaygroundGenerationConfig.svelte";
9
+ import HFTokenModal from "./InferencePlaygroundHFTokenModal.svelte";
10
+ import ModelSelector from "./InferencePlaygroundModelSelector.svelte";
11
+ import Conversation from "./InferencePlaygroundConversation.svelte";
12
+ import { onDestroy } from "svelte";
13
+ import { type ChatCompletionInputMessage } from "@huggingface/tasks";
14
+ import type { ModelEntryWithTokenizer } from "$lib/types";
15
+ import { defaultGenerationConfig } from "./generationConfigSettings";
16
+ import IconShare from "../Icons/IconShare.svelte";
17
+ import IconDelete from "../Icons/IconDelete.svelte";
18
+ import IconCode from "../Icons/IconCode.svelte";
19
 
20
  export let models: ModelEntryWithTokenizer[];
21
 
22
+ const startMessages: ChatCompletionInputMessage[] = [{ role: "user", content: "" }];
23
 
24
  let conversation: Conversation = {
25
  model: models[0],
26
  config: defaultGenerationConfig,
27
  messages: startMessages,
28
+ streaming: true,
29
  };
30
 
31
+ let systemMessage: ChatCompletionInputMessage = { role: "system", content: "" };
32
  let hfToken: string | undefined = import.meta.env.VITE_HF_TOKEN;
33
  let viewCode = false;
34
  let showTokenModal = false;
 
47
  conversation.messages = [
48
  ...conversation.messages,
49
  {
50
+ role: conversation.messages.at(-1)?.role === "user" ? "assistant" : "user",
51
+ content: "",
52
+ },
53
  ];
54
  }
55
 
 
59
  }
60
 
61
  function reset() {
62
+ systemMessage.content = "";
63
  conversation.messages = [...startMessages];
64
  }
65
 
 
88
  const hf = createHfInference(hfToken);
89
 
90
  if (conversation.streaming) {
91
+ const streamingMessage = { role: "assistant", content: "" };
92
  conversation.messages = [...conversation.messages, streamingMessage];
93
  abortController = new AbortController();
94
 
95
  await handleStreamingResponse(
96
  hf,
97
  conversation,
98
+ content => {
99
  if (streamingMessage) {
100
  streamingMessage.content = content;
101
  conversation.messages = [...conversation.messages];
 
118
 
119
  addMessage();
120
  } catch (error) {
121
+ if (error.name !== "AbortError") {
122
+ alert("error: " + (error as Error).message);
123
  }
124
  } finally {
125
  loading = false;
 
128
  }
129
 
130
  function onKeydown(event: KeyboardEvent) {
131
+ if (!event.shiftKey && event.key === "Enter") {
132
  submit();
133
  }
134
  }
 
137
  {#if showTokenModal}
138
  <HFTokenModal
139
  on:close={() => (showTokenModal = false)}
140
+ on:submit={e => {
141
  const formData = new FormData(e.target);
142
+ hfToken = formData.get("hf-token");
143
  submit();
144
  showTokenModal = false;
145
  }}
 
161
  name=""
162
  id=""
163
  placeholder={systemPromptSupported
164
+ ? "Enter a custom prompt"
165
+ : "System prompt is not supported with the chosen model."}
166
  bind:value={systemMessage.content}
167
  class="absolute inset-x-0 bottom-0 h-full resize-none bg-transparent px-3 pt-10 text-sm outline-none"
168
  ></textarea>
 
176
  index={0}
177
  {viewCode}
178
  on:addMessage={addMessage}
179
+ on:deleteMessage={e => deleteMessage(e.detail)}
180
  />
181
  </div>
182
  <div
 
186
  type="button"
187
  class="flex h-[39px] flex-none gap-2 rounded-lg border border-gray-200 bg-white px-3 py-2.5 text-sm font-medium text-gray-900 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:outline-none focus:ring-4 focus:ring-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
188
  >
189
+ <div class="flex size-5 items-center justify-center rounded border border-black/5 bg-black/5 text-xs">
 
 
190
  <IconShare />
191
  </div>
192
 
 
209
  class="flex h-[39px] items-center gap-2 rounded-lg border border-gray-200 bg-white px-3 py-2.5 text-sm font-medium text-gray-900 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:outline-none focus:ring-4 focus:ring-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
210
  >
211
  <IconCode />
212
+ {!viewCode ? "View Code" : "Hide Code"}</button
213
  >
214
  <button
215
  on:click={() => {
 
238
  />
239
  </div>
240
  {:else}
241
+ Run <span class="inline-flex gap-0.5 rounded border border-white/20 bg-white/10 px-0.5 text-xs text-white/70"
 
242
  >↵</span
243
  >
244
  {/if}
 
254
  <GenerationConfig bind:conversation />
255
  <div class="mt-auto">
256
  <div class="mb-3 flex items-center justify-between gap-2">
257
+ <label for="default-range" class="block text-sm font-medium text-gray-900 dark:text-white">API Quota</label>
 
 
258
  <span
259
  class="rounded bg-gray-100 px-1.5 py-0.5 text-xs font-medium text-gray-800 dark:bg-gray-700 dark:text-gray-300"
260
  >Free</span
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte CHANGED
@@ -1,24 +1,24 @@
1
  <script lang="ts">
2
- import hljs from 'highlight.js/lib/core';
3
- import javascript from 'highlight.js/lib/languages/javascript';
4
- import python from 'highlight.js/lib/languages/python';
5
- import http from 'highlight.js/lib/languages/http';
6
- import type { Conversation } from '$lib/types';
7
- import IconCopyCode from '../Icons/IconCopyCode.svelte';
8
- import { onDestroy } from 'svelte';
9
-
10
- hljs.registerLanguage('javascript', javascript);
11
- hljs.registerLanguage('python', python);
12
- hljs.registerLanguage('http', http);
13
 
14
  export let conversation: Conversation;
15
 
16
- const lanuages = ['javascript', 'python', 'http'];
17
  type Language = (typeof lanuages)[number];
18
  const labelsByLanguage: Record<Language, string> = {
19
- javascript: 'JavaScript',
20
- python: 'Python',
21
- http: 'Curl'
22
  };
23
 
24
  interface Snippet {
@@ -30,16 +30,16 @@
30
  $: snippetsByLanguage = {
31
  javascript: getJavascriptSnippets(conversation),
32
  python: getPythonSnippets(conversation),
33
- http: getHttpSnippets(conversation)
34
  };
35
 
36
- let selectedLanguage: Language = 'javascript';
37
  let timeout: ReturnType<typeof setTimeout>;
38
 
39
  function getMessages() {
40
- const placeholder = [{ role: 'user', content: 'Tell me a story' }];
41
  let messages = conversation.messages;
42
- if (messages.length === 1 && messages[0].role === 'user' && !messages[0].content) {
43
  messages = placeholder;
44
  }
45
  return messages;
@@ -66,13 +66,13 @@
66
 
67
  const snippets: Snippet[] = [];
68
  snippets.push({
69
- label: 'Install @huggingface/inference',
70
- language: 'http',
71
- code: `npm install --save @huggingface/inference`
72
  });
73
  if (conversation.streaming) {
74
  snippets.push({
75
- label: 'Streaming API',
76
  code: `import { HfInference } from "@huggingface/inference"
77
 
78
  const inference = new HfInference("your HF token")
@@ -81,8 +81,8 @@ let out = "";
81
 
82
  for await (const chunk of inference.chatCompletionStream({
83
  model: "${conversation.model.id}",
84
- messages: ${formattedMessages({ sep: ',\n ', start: '[\n ', end: '\n ]' })},
85
- ${formattedConfig({ sep: ',\n ', start: '', end: '' })},
86
  seed: 0,
87
  })) {
88
  if (chunk.choices && chunk.choices.length > 0) {
@@ -91,24 +91,24 @@ for await (const chunk of inference.chatCompletionStream({
91
  console.clear();
92
  console.log(out);
93
  }
94
- }`
95
  });
96
  } else {
97
  // non-streaming
98
  snippets.push({
99
- label: 'Non-Streaming API',
100
  code: `import { HfInference } from '@huggingface/inference'
101
 
102
  const inference = new HfInference("your access token")
103
 
104
  const out = await inference.chatCompletion({
105
  model: "${conversation.model.id}",
106
- messages: ${formattedMessages({ sep: ',\n ', start: '[\n ', end: '\n ]' })},
107
- ${formattedConfig({ sep: ',\n ', start: '', end: '' })},
108
  seed: 0,
109
  });
110
 
111
- console.log(out.choices[0].message);`
112
  });
113
  }
114
 
@@ -132,13 +132,13 @@ console.log(out.choices[0].message);`
132
 
133
  const snippets: Snippet[] = [];
134
  snippets.push({
135
- label: 'Install huggingface_hub',
136
- language: 'http',
137
- code: `pip install huggingface_hub`
138
  });
139
  if (conversation.streaming) {
140
  snippets.push({
141
- label: 'Streaming API',
142
  code: `from huggingface_hub import InferenceClient
143
 
144
  model_id="${conversation.model.id}"
@@ -147,28 +147,28 @@ inference_client = InferenceClient(model_id, token=hf_token)
147
 
148
  output = ""
149
 
150
- messages = ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })}
151
 
152
- for token in client.chat_completion(messages, stream=True, ${formattedConfig({ sep: ', ', start: '', end: '' })}):
153
  new_content = token.choices[0].delta.content
154
  print(new_content, end="")
155
- output += new_content`
156
  });
157
  } else {
158
  // non-streaming
159
  snippets.push({
160
- label: 'Non-Streaming API',
161
  code: `from huggingface_hub import InferenceClient
162
 
163
  model_id="${conversation.model.id}"
164
  hf_token = "your HF token"
165
  inference_client = InferenceClient(model_id, token=hf_token)
166
 
167
- messages = ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })}
168
 
169
- output = inference_client.chat_completion(messages, ${formattedConfig({ sep: ', ', start: '', end: '' })})
170
 
171
- print(output.choices[0].message)`
172
  });
173
  }
174
 
@@ -194,29 +194,29 @@ print(output.choices[0].message)`
194
 
195
  if (conversation.streaming) {
196
  snippets.push({
197
- label: 'Streaming API',
198
  code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
199
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \\
200
  --header 'Content-Type: application/json' \\
201
  --data '{
202
  "model": "${conversation.model.id}",
203
- "messages": ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })},
204
- ${formattedConfig({ sep: ',\n ', start: '', end: '' })},
205
  "stream": true
206
- }'`
207
  });
208
  } else {
209
  // non-streaming
210
  snippets.push({
211
- label: 'Non-Streaming API',
212
  code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
213
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \\
214
  --header 'Content-Type: application/json' \\
215
  --data '{
216
  "model": "${conversation.model.id}",
217
- "messages": ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })},
218
- ${formattedConfig({ sep: ',\n ', start: '', end: '' })}
219
- }'`
220
  });
221
  }
222
 
@@ -254,15 +254,15 @@ print(output.choices[0].message)`
254
  <h2 class="font-semibold">{label}</h2>
255
  <button
256
  class="flex items-center gap-x-1.5 rounded-md bg-gray-200 px-1.5 py-0.5 text-sm transition dark:bg-gray-950"
257
- on:click={(e) => {
258
  const el = e.currentTarget;
259
- el.classList.add('text-green-500');
260
  navigator.clipboard.writeText(code);
261
  if (timeout) {
262
  clearTimeout(timeout);
263
  }
264
  timeout = setTimeout(() => {
265
- el.classList.remove('text-green-500');
266
  }, 1000);
267
  }}
268
  >
 
1
  <script lang="ts">
2
+ import hljs from "highlight.js/lib/core";
3
+ import javascript from "highlight.js/lib/languages/javascript";
4
+ import python from "highlight.js/lib/languages/python";
5
+ import http from "highlight.js/lib/languages/http";
6
+ import type { Conversation } from "$lib/types";
7
+ import IconCopyCode from "../Icons/IconCopyCode.svelte";
8
+ import { onDestroy } from "svelte";
9
+
10
+ hljs.registerLanguage("javascript", javascript);
11
+ hljs.registerLanguage("python", python);
12
+ hljs.registerLanguage("http", http);
13
 
14
  export let conversation: Conversation;
15
 
16
+ const lanuages = ["javascript", "python", "http"];
17
  type Language = (typeof lanuages)[number];
18
  const labelsByLanguage: Record<Language, string> = {
19
+ javascript: "JavaScript",
20
+ python: "Python",
21
+ http: "Curl",
22
  };
23
 
24
  interface Snippet {
 
30
  $: snippetsByLanguage = {
31
  javascript: getJavascriptSnippets(conversation),
32
  python: getPythonSnippets(conversation),
33
+ http: getHttpSnippets(conversation),
34
  };
35
 
36
+ let selectedLanguage: Language = "javascript";
37
  let timeout: ReturnType<typeof setTimeout>;
38
 
39
  function getMessages() {
40
+ const placeholder = [{ role: "user", content: "Tell me a story" }];
41
  let messages = conversation.messages;
42
+ if (messages.length === 1 && messages[0].role === "user" && !messages[0].content) {
43
  messages = placeholder;
44
  }
45
  return messages;
 
66
 
67
  const snippets: Snippet[] = [];
68
  snippets.push({
69
+ label: "Install @huggingface/inference",
70
+ language: "http",
71
+ code: `npm install --save @huggingface/inference`,
72
  });
73
  if (conversation.streaming) {
74
  snippets.push({
75
+ label: "Streaming API",
76
  code: `import { HfInference } from "@huggingface/inference"
77
 
78
  const inference = new HfInference("your HF token")
 
81
 
82
  for await (const chunk of inference.chatCompletionStream({
83
  model: "${conversation.model.id}",
84
+ messages: ${formattedMessages({ sep: ",\n ", start: "[\n ", end: "\n ]" })},
85
+ ${formattedConfig({ sep: ",\n ", start: "", end: "" })},
86
  seed: 0,
87
  })) {
88
  if (chunk.choices && chunk.choices.length > 0) {
 
91
  console.clear();
92
  console.log(out);
93
  }
94
+ }`,
95
  });
96
  } else {
97
  // non-streaming
98
  snippets.push({
99
+ label: "Non-Streaming API",
100
  code: `import { HfInference } from '@huggingface/inference'
101
 
102
  const inference = new HfInference("your access token")
103
 
104
  const out = await inference.chatCompletion({
105
  model: "${conversation.model.id}",
106
+ messages: ${formattedMessages({ sep: ",\n ", start: "[\n ", end: "\n ]" })},
107
+ ${formattedConfig({ sep: ",\n ", start: "", end: "" })},
108
  seed: 0,
109
  });
110
 
111
+ console.log(out.choices[0].message);`,
112
  });
113
  }
114
 
 
132
 
133
  const snippets: Snippet[] = [];
134
  snippets.push({
135
+ label: "Install huggingface_hub",
136
+ language: "http",
137
+ code: `pip install huggingface_hub`,
138
  });
139
  if (conversation.streaming) {
140
  snippets.push({
141
+ label: "Streaming API",
142
  code: `from huggingface_hub import InferenceClient
143
 
144
  model_id="${conversation.model.id}"
 
147
 
148
  output = ""
149
 
150
+ messages = ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })}
151
 
152
+ for token in client.chat_completion(messages, stream=True, ${formattedConfig({ sep: ", ", start: "", end: "" })}):
153
  new_content = token.choices[0].delta.content
154
  print(new_content, end="")
155
+ output += new_content`,
156
  });
157
  } else {
158
  // non-streaming
159
  snippets.push({
160
+ label: "Non-Streaming API",
161
  code: `from huggingface_hub import InferenceClient
162
 
163
  model_id="${conversation.model.id}"
164
  hf_token = "your HF token"
165
  inference_client = InferenceClient(model_id, token=hf_token)
166
 
167
+ messages = ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })}
168
 
169
+ output = inference_client.chat_completion(messages, ${formattedConfig({ sep: ", ", start: "", end: "" })})
170
 
171
+ print(output.choices[0].message)`,
172
  });
173
  }
174
 
 
194
 
195
  if (conversation.streaming) {
196
  snippets.push({
197
+ label: "Streaming API",
198
  code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
199
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \\
200
  --header 'Content-Type: application/json' \\
201
  --data '{
202
  "model": "${conversation.model.id}",
203
+ "messages": ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })},
204
+ ${formattedConfig({ sep: ",\n ", start: "", end: "" })},
205
  "stream": true
206
+ }'`,
207
  });
208
  } else {
209
  // non-streaming
210
  snippets.push({
211
+ label: "Non-Streaming API",
212
  code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
213
  --header "Authorization: Bearer {YOUR_HF_TOKEN}" \\
214
  --header 'Content-Type: application/json' \\
215
  --data '{
216
  "model": "${conversation.model.id}",
217
+ "messages": ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })},
218
+ ${formattedConfig({ sep: ",\n ", start: "", end: "" })}
219
+ }'`,
220
  });
221
  }
222
 
 
254
  <h2 class="font-semibold">{label}</h2>
255
  <button
256
  class="flex items-center gap-x-1.5 rounded-md bg-gray-200 px-1.5 py-0.5 text-sm transition dark:bg-gray-950"
257
+ on:click={e => {
258
  const el = e.currentTarget;
259
+ el.classList.add("text-green-500");
260
  navigator.clipboard.writeText(code);
261
  if (timeout) {
262
  clearTimeout(timeout);
263
  }
264
  timeout = setTimeout(() => {
265
+ el.classList.remove("text-green-500");
266
  }, 1000);
267
  }}
268
  >
src/lib/components/InferencePlayground/InferencePlaygroundConversation.svelte CHANGED
@@ -1,9 +1,9 @@
1
  <script lang="ts">
2
- import { createEventDispatcher } from 'svelte';
3
- import CodeSnippets from './InferencePlaygroundCodeSnippets.svelte';
4
- import Message from './InferencePlaygroundMessage.svelte';
5
- import IconPlus from '../Icons/IconPlus.svelte';
6
- import type { Conversation } from '$lib/types';
7
 
8
  export let loading;
9
  export let conversation: Conversation;
@@ -40,14 +40,14 @@
40
  <Message
41
  class="border-b"
42
  {message}
43
- on:delete={() => dispatch('deleteMessage', messageIdx)}
44
  autofocus={!loading && messageIdx === conversation.messages.length - 1}
45
  />
46
  {/each}
47
 
48
  <button
49
  class="flex px-6 py-6 hover:bg-gray-50 dark:hover:bg-gray-800/50"
50
- on:click={() => dispatch('addMessage')}
51
  disabled={loading}
52
  >
53
  <div class="flex items-center gap-2 !p-0 text-sm font-semibold">
 
1
  <script lang="ts">
2
+ import { createEventDispatcher } from "svelte";
3
+ import CodeSnippets from "./InferencePlaygroundCodeSnippets.svelte";
4
+ import Message from "./InferencePlaygroundMessage.svelte";
5
+ import IconPlus from "../Icons/IconPlus.svelte";
6
+ import type { Conversation } from "$lib/types";
7
 
8
  export let loading;
9
  export let conversation: Conversation;
 
40
  <Message
41
  class="border-b"
42
  {message}
43
+ on:delete={() => dispatch("deleteMessage", messageIdx)}
44
  autofocus={!loading && messageIdx === conversation.messages.length - 1}
45
  />
46
  {/each}
47
 
48
  <button
49
  class="flex px-6 py-6 hover:bg-gray-50 dark:hover:bg-gray-800/50"
50
+ on:click={() => dispatch("addMessage")}
51
  disabled={loading}
52
  >
53
  <div class="flex items-center gap-2 !p-0 text-sm font-semibold">
src/lib/components/InferencePlayground/InferencePlaygroundGenerationConfig.svelte CHANGED
@@ -1,27 +1,26 @@
1
  <script lang="ts">
2
- import type { Conversation } from '$lib/types';
3
  import {
4
  GENERATION_CONFIG_KEYS,
5
  GENERATION_CONFIG_KEYS_ADVANCED,
6
- GENERATION_CONFIG_SETTINGS
7
- } from './generationConfigSettings';
8
 
9
  export let conversation: Conversation;
10
- export let classNames = '';
11
 
12
  $: modelMaxLength = conversation.model.tokenizerConfig.model_max_length;
13
- $: maxTokens = Math.min(modelMaxLength ?? GENERATION_CONFIG_SETTINGS['max_tokens'].max, 64_000);
14
  </script>
15
 
16
  <div class="flex flex-col gap-y-7 {classNames}">
17
  {#each GENERATION_CONFIG_KEYS as key}
18
  {@const { label, min, step } = GENERATION_CONFIG_SETTINGS[key]}
19
- {@const max = key === 'max_tokens' ? maxTokens : GENERATION_CONFIG_SETTINGS[key].max}
20
  <div>
21
  <div class="flex items-center justify-between">
22
- <label
23
- for="temperature-range"
24
- class="mb-2 block text-sm font-medium text-gray-900 dark:text-white">{label}</label
25
  >
26
  <input
27
  type="number"
@@ -51,9 +50,7 @@
51
  {@const settings = GENERATION_CONFIG_SETTINGS[key]}
52
  <div>
53
  <div class="flex items-center justify-between">
54
- <label
55
- for="temperature-range"
56
- class="mb-2 block text-sm font-medium text-gray-900 dark:text-white"
57
  >{settings.label}</label
58
  >
59
  <input
@@ -63,7 +60,7 @@
63
  max={settings.max}
64
  step={settings.step}
65
  value={conversation.config[key] ?? settings.default}
66
- on:input={(e) => (conversation.config[key] = Number(e.currentTarget.value))}
67
  />
68
  </div>
69
  <input
@@ -73,7 +70,7 @@
73
  max={settings.max}
74
  step={settings.step}
75
  value={conversation.config[key] ?? settings.default}
76
- on:input={(e) => (conversation.config[key] = Number(e.currentTarget.value))}
77
  class="h-2 w-full cursor-pointer appearance-none rounded-lg bg-gray-200 accent-black dark:bg-gray-700 dark:accent-blue-500"
78
  />
79
  </div>
 
1
  <script lang="ts">
2
+ import type { Conversation } from "$lib/types";
3
  import {
4
  GENERATION_CONFIG_KEYS,
5
  GENERATION_CONFIG_KEYS_ADVANCED,
6
+ GENERATION_CONFIG_SETTINGS,
7
+ } from "./generationConfigSettings";
8
 
9
  export let conversation: Conversation;
10
+ export let classNames = "";
11
 
12
  $: modelMaxLength = conversation.model.tokenizerConfig.model_max_length;
13
+ $: maxTokens = Math.min(modelMaxLength ?? GENERATION_CONFIG_SETTINGS["max_tokens"].max, 64_000);
14
  </script>
15
 
16
  <div class="flex flex-col gap-y-7 {classNames}">
17
  {#each GENERATION_CONFIG_KEYS as key}
18
  {@const { label, min, step } = GENERATION_CONFIG_SETTINGS[key]}
19
+ {@const max = key === "max_tokens" ? maxTokens : GENERATION_CONFIG_SETTINGS[key].max}
20
  <div>
21
  <div class="flex items-center justify-between">
22
+ <label for="temperature-range" class="mb-2 block text-sm font-medium text-gray-900 dark:text-white"
23
+ >{label}</label
 
24
  >
25
  <input
26
  type="number"
 
50
  {@const settings = GENERATION_CONFIG_SETTINGS[key]}
51
  <div>
52
  <div class="flex items-center justify-between">
53
+ <label for="temperature-range" class="mb-2 block text-sm font-medium text-gray-900 dark:text-white"
 
 
54
  >{settings.label}</label
55
  >
56
  <input
 
60
  max={settings.max}
61
  step={settings.step}
62
  value={conversation.config[key] ?? settings.default}
63
+ on:input={e => (conversation.config[key] = Number(e.currentTarget.value))}
64
  />
65
  </div>
66
  <input
 
70
  max={settings.max}
71
  step={settings.step}
72
  value={conversation.config[key] ?? settings.default}
73
+ on:input={e => (conversation.config[key] = Number(e.currentTarget.value))}
74
  class="h-2 w-full cursor-pointer appearance-none rounded-lg bg-gray-200 accent-black dark:bg-gray-700 dark:accent-blue-500"
75
  />
76
  </div>
src/lib/components/InferencePlayground/InferencePlaygroundHFTokenModal.svelte CHANGED
@@ -1,8 +1,8 @@
1
  <!-- Main modal -->
2
  <script lang="ts">
3
- import { createEventDispatcher, onDestroy, onMount } from 'svelte';
4
- import { browser } from '$app/environment';
5
- import IconCross from '../Icons/IconCross.svelte';
6
 
7
  let backdropEl: HTMLDivElement;
8
  let modalEl: HTMLDivElement;
@@ -11,9 +11,9 @@
11
 
12
  function handleKeydown(event: KeyboardEvent) {
13
  // close on ESC
14
- if (event.key === 'Escape') {
15
  event.preventDefault();
16
- dispatch('close');
17
  }
18
  }
19
 
@@ -22,12 +22,12 @@
22
  return;
23
  }
24
  if (event.target === backdropEl) {
25
- dispatch('close');
26
  }
27
  }
28
 
29
  onMount(() => {
30
- document.getElementById('app')?.setAttribute('inert', 'true');
31
  modalEl.focus();
32
  });
33
 
@@ -35,7 +35,7 @@
35
  if (!browser) return;
36
  // remove inert attribute if this is the last modal
37
  if (document.querySelectorAll('[role="dialog"]:not(#app *)').length === 1) {
38
- document.getElementById('app')?.removeAttribute('inert');
39
  }
40
  });
41
  </script>
@@ -56,9 +56,7 @@
56
  on:keydown={handleKeydown}
57
  >
58
  <form on:submit|preventDefault class="relative rounded-lg bg-white shadow dark:bg-gray-900">
59
- <div
60
- class="flex items-center justify-between rounded-t border-b p-4 md:px-5 md:py-4 dark:border-gray-600"
61
- >
62
  <h3 class="flex items-center gap-2.5 text-lg font-semibold text-gray-900 dark:text-white">
63
  <img
64
  alt="Hugging Face's logo"
@@ -68,7 +66,7 @@
68
  </h3>
69
  <button
70
  type="button"
71
- on:click={() => dispatch('close')}
72
  class="ms-auto inline-flex h-8 w-8 items-center justify-center rounded-lg bg-transparent text-sm text-gray-400 hover:bg-gray-200 hover:text-gray-900 dark:hover:bg-gray-600 dark:hover:text-white"
73
  >
74
  <IconCross classNames="text-xl" />
@@ -98,9 +96,7 @@
98
  </div>
99
 
100
  <!-- Modal footer -->
101
- <div
102
- class="flex items-center justify-between rounded-b border-t border-gray-200 p-4 md:p-5 dark:border-gray-600"
103
- >
104
  <a
105
  href="https://huggingface.co/settings/tokens?new_token=true"
106
  tabindex="-1"
 
1
  <!-- Main modal -->
2
  <script lang="ts">
3
+ import { createEventDispatcher, onDestroy, onMount } from "svelte";
4
+ import { browser } from "$app/environment";
5
+ import IconCross from "../Icons/IconCross.svelte";
6
 
7
  let backdropEl: HTMLDivElement;
8
  let modalEl: HTMLDivElement;
 
11
 
12
  function handleKeydown(event: KeyboardEvent) {
13
  // close on ESC
14
+ if (event.key === "Escape") {
15
  event.preventDefault();
16
+ dispatch("close");
17
  }
18
  }
19
 
 
22
  return;
23
  }
24
  if (event.target === backdropEl) {
25
+ dispatch("close");
26
  }
27
  }
28
 
29
  onMount(() => {
30
+ document.getElementById("app")?.setAttribute("inert", "true");
31
  modalEl.focus();
32
  });
33
 
 
35
  if (!browser) return;
36
  // remove inert attribute if this is the last modal
37
  if (document.querySelectorAll('[role="dialog"]:not(#app *)').length === 1) {
38
+ document.getElementById("app")?.removeAttribute("inert");
39
  }
40
  });
41
  </script>
 
56
  on:keydown={handleKeydown}
57
  >
58
  <form on:submit|preventDefault class="relative rounded-lg bg-white shadow dark:bg-gray-900">
59
+ <div class="flex items-center justify-between rounded-t border-b p-4 md:px-5 md:py-4 dark:border-gray-600">
 
 
60
  <h3 class="flex items-center gap-2.5 text-lg font-semibold text-gray-900 dark:text-white">
61
  <img
62
  alt="Hugging Face's logo"
 
66
  </h3>
67
  <button
68
  type="button"
69
+ on:click={() => dispatch("close")}
70
  class="ms-auto inline-flex h-8 w-8 items-center justify-center rounded-lg bg-transparent text-sm text-gray-400 hover:bg-gray-200 hover:text-gray-900 dark:hover:bg-gray-600 dark:hover:text-white"
71
  >
72
  <IconCross classNames="text-xl" />
 
96
  </div>
97
 
98
  <!-- Modal footer -->
99
+ <div class="flex items-center justify-between rounded-b border-t border-gray-200 p-4 md:p-5 dark:border-gray-600">
 
 
100
  <a
101
  href="https://huggingface.co/settings/tokens?new_token=true"
102
  tabindex="-1"
src/lib/components/InferencePlayground/InferencePlaygroundMessage.svelte CHANGED
@@ -1,6 +1,6 @@
1
  <script lang="ts">
2
- import { createEventDispatcher } from 'svelte';
3
- import { type ChatCompletionInputMessage } from '@huggingface/tasks';
4
 
5
  export let message: ChatCompletionInputMessage;
6
  export let autofocus: boolean = false;
@@ -24,7 +24,7 @@
24
  <button
25
  tabindex="1"
26
  on:click={() => {
27
- dispatch('delete');
28
  }}
29
  type="button"
30
  class="mt-1.5 hidden size-8 rounded-lg border border-gray-200 bg-white text-xs font-medium text-gray-900 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:outline-none focus:ring-4 focus:ring-gray-100 group-hover/message:block dark:border-gray-600 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
 
1
  <script lang="ts">
2
+ import { createEventDispatcher } from "svelte";
3
+ import { type ChatCompletionInputMessage } from "@huggingface/tasks";
4
 
5
  export let message: ChatCompletionInputMessage;
6
  export let autofocus: boolean = false;
 
24
  <button
25
  tabindex="1"
26
  on:click={() => {
27
+ dispatch("delete");
28
  }}
29
  type="button"
30
  class="mt-1.5 hidden size-8 rounded-lg border border-gray-200 bg-white text-xs font-medium text-gray-900 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:outline-none focus:ring-4 focus:ring-gray-100 group-hover/message:block dark:border-gray-600 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white dark:focus:ring-gray-700"
src/lib/components/InferencePlayground/InferencePlaygroundModelSelector.svelte CHANGED
@@ -1,7 +1,7 @@
1
  <script lang="ts">
2
- import type { Conversation, ModelEntryWithTokenizer } from '$lib/types';
3
- import IconCaret from '../Icons/IconCaret.svelte';
4
- import ModelSelectorModal from './InferencePlaygroundModelSelectorModal.svelte';
5
 
6
  export let models: ModelEntryWithTokenizer[] = [];
7
  export let conversation: Conversation;
@@ -21,28 +21,26 @@
21
  }
22
 
23
  function changeModel(modelId: string) {
24
- const model = models.find((m) => m.id === modelId);
25
  if (!model) {
26
  return;
27
  }
28
  conversation.model = model;
29
  }
30
 
31
- $: [nameSpace, modelName] = conversation.model.id.split('/');
32
  </script>
33
 
34
  {#if showModelPickerModal}
35
  <ModelSelectorModal
36
  {models}
37
- on:modelSelected={(e) => changeModel(e.detail)}
38
- on:close={(e) => (showModelPickerModal = false)}
39
  />
40
  {/if}
41
 
42
  <div class="flex flex-col gap-2">
43
- <label
44
- for="countries"
45
- class="flex items-baseline text-sm font-medium text-gray-900 dark:text-white"
46
  >Models<span class="ml-4 font-normal text-gray-400">{models.length}</span>
47
  </label>
48
 
@@ -53,11 +51,7 @@
53
  <div class="flex flex-col items-start">
54
  <div class="flex items-center gap-1 text-sm text-gray-500 dark:text-gray-300">
55
  {#await getAvatarUrl(nameSpace) then avatarUrl}
56
- <img
57
- class="size-3 flex-none rounded bg-gray-200 object-cover"
58
- src={avatarUrl}
59
- alt="{nameSpace} avatar"
60
- />
61
  {/await}
62
  {nameSpace}
63
  </div>
 
1
  <script lang="ts">
2
+ import type { Conversation, ModelEntryWithTokenizer } from "$lib/types";
3
+ import IconCaret from "../Icons/IconCaret.svelte";
4
+ import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
5
 
6
  export let models: ModelEntryWithTokenizer[] = [];
7
  export let conversation: Conversation;
 
21
  }
22
 
23
  function changeModel(modelId: string) {
24
+ const model = models.find(m => m.id === modelId);
25
  if (!model) {
26
  return;
27
  }
28
  conversation.model = model;
29
  }
30
 
31
+ $: [nameSpace, modelName] = conversation.model.id.split("/");
32
  </script>
33
 
34
  {#if showModelPickerModal}
35
  <ModelSelectorModal
36
  {models}
37
+ on:modelSelected={e => changeModel(e.detail)}
38
+ on:close={e => (showModelPickerModal = false)}
39
  />
40
  {/if}
41
 
42
  <div class="flex flex-col gap-2">
43
+ <label for="countries" class="flex items-baseline text-sm font-medium text-gray-900 dark:text-white"
 
 
44
  >Models<span class="ml-4 font-normal text-gray-400">{models.length}</span>
45
  </label>
46
 
 
51
  <div class="flex flex-col items-start">
52
  <div class="flex items-center gap-1 text-sm text-gray-500 dark:text-gray-300">
53
  {#await getAvatarUrl(nameSpace) then avatarUrl}
54
+ <img class="size-3 flex-none rounded bg-gray-200 object-cover" src={avatarUrl} alt="{nameSpace} avatar" />
 
 
 
 
55
  {/await}
56
  {nameSpace}
57
  </div>
src/lib/components/InferencePlayground/InferencePlaygroundModelSelectorModal.svelte CHANGED
@@ -1,8 +1,8 @@
1
  <script lang="ts">
2
- import type { ModelEntryWithTokenizer } from '$lib/types';
3
- import { createEventDispatcher } from 'svelte';
4
- import IconSearch from '../Icons/IconSearch.svelte';
5
- import IconStar from '../Icons/IconStar.svelte';
6
 
7
  export let models: ModelEntryWithTokenizer[];
8
 
@@ -12,9 +12,9 @@
12
 
13
  function handleKeydown(event: KeyboardEvent) {
14
  // close on ESC
15
- if (event.key === 'Escape') {
16
  event.preventDefault();
17
- dispatch('close');
18
  }
19
  }
20
 
@@ -23,7 +23,7 @@
23
  return;
24
  }
25
  if (event.target === backdropEl) {
26
- dispatch('close');
27
  }
28
  }
29
  </script>
@@ -36,9 +36,7 @@
36
  on:click|stopPropagation={handleBackdropClick}
37
  >
38
  <div class="flex w-full max-w-[600px] items-start justify-center p-10">
39
- <div
40
- class="flex h-full w-full flex-col overflow-hidden rounded-lg border bg-white text-gray-900 shadow-md"
41
- >
42
  <div class="flex items-center border-b px-3">
43
  <IconSearch classNames="mr-2 text-sm" />
44
  <input
@@ -55,15 +53,17 @@
55
  <div class="flex cursor-pointer items-center px-2 py-1.5 text-sm hover:bg-gray-100">
56
  <IconStar classNames="lucide lucide-star mr-2 h-4 w-4 text-yellow-400" />
57
  <span class="inline-flex items-center"
58
- ><span class="text-gray-500">meta-llama</span><span class="mx-1 text-black">/</span
59
- ><span class="text-black">Meta-Llama-3-70B-Instruct</span></span
 
60
  >
61
  </div>
62
  <div class="flex cursor-pointer items-center px-2 py-1.5 text-sm hover:bg-gray-100">
63
  <IconStar classNames="lucide lucide-star mr-2 h-4 w-4 text-yellow-400" />
64
  <span class="inline-flex items-center"
65
- ><span class="text-gray-500">mistralai</span><span class="mx-1 text-black">/</span
66
- ><span class="text-black">Mixtral-8x7B-Instruct-v0.1</span></span
 
67
  >
68
  </div>
69
  </div>
@@ -73,18 +73,18 @@
73
  <div class="px-2 py-1.5 text-xs font-medium text-gray-500">Other Models</div>
74
  <div>
75
  {#each models as model}
76
- {@const [nameSpace, modelName] = model.id.split('/')}
77
  <button
78
  class="flex cursor-pointer items-center px-2 py-1.5 text-sm hover:bg-gray-100"
79
  on:click={() => {
80
- dispatch('modelSelected', model.id);
81
- dispatch('close');
82
  }}
83
  >
84
  <span class="inline-flex items-center"
85
- ><span class="text-gray-500">{nameSpace}</span><span class="mx-1 text-black"
86
- >/</span
87
- ><span class="text-black">{modelName}</span></span
88
  >
89
  </button>
90
  {/each}
 
1
  <script lang="ts">
2
+ import type { ModelEntryWithTokenizer } from "$lib/types";
3
+ import { createEventDispatcher } from "svelte";
4
+ import IconSearch from "../Icons/IconSearch.svelte";
5
+ import IconStar from "../Icons/IconStar.svelte";
6
 
7
  export let models: ModelEntryWithTokenizer[];
8
 
 
12
 
13
  function handleKeydown(event: KeyboardEvent) {
14
  // close on ESC
15
+ if (event.key === "Escape") {
16
  event.preventDefault();
17
+ dispatch("close");
18
  }
19
  }
20
 
 
23
  return;
24
  }
25
  if (event.target === backdropEl) {
26
+ dispatch("close");
27
  }
28
  }
29
  </script>
 
36
  on:click|stopPropagation={handleBackdropClick}
37
  >
38
  <div class="flex w-full max-w-[600px] items-start justify-center p-10">
39
+ <div class="flex h-full w-full flex-col overflow-hidden rounded-lg border bg-white text-gray-900 shadow-md">
 
 
40
  <div class="flex items-center border-b px-3">
41
  <IconSearch classNames="mr-2 text-sm" />
42
  <input
 
53
  <div class="flex cursor-pointer items-center px-2 py-1.5 text-sm hover:bg-gray-100">
54
  <IconStar classNames="lucide lucide-star mr-2 h-4 w-4 text-yellow-400" />
55
  <span class="inline-flex items-center"
56
+ ><span class="text-gray-500">meta-llama</span><span class="mx-1 text-black">/</span><span
57
+ class="text-black">Meta-Llama-3-70B-Instruct</span
58
+ ></span
59
  >
60
  </div>
61
  <div class="flex cursor-pointer items-center px-2 py-1.5 text-sm hover:bg-gray-100">
62
  <IconStar classNames="lucide lucide-star mr-2 h-4 w-4 text-yellow-400" />
63
  <span class="inline-flex items-center"
64
+ ><span class="text-gray-500">mistralai</span><span class="mx-1 text-black">/</span><span
65
+ class="text-black">Mixtral-8x7B-Instruct-v0.1</span
66
+ ></span
67
  >
68
  </div>
69
  </div>
 
73
  <div class="px-2 py-1.5 text-xs font-medium text-gray-500">Other Models</div>
74
  <div>
75
  {#each models as model}
76
+ {@const [nameSpace, modelName] = model.id.split("/")}
77
  <button
78
  class="flex cursor-pointer items-center px-2 py-1.5 text-sm hover:bg-gray-100"
79
  on:click={() => {
80
+ dispatch("modelSelected", model.id);
81
+ dispatch("close");
82
  }}
83
  >
84
  <span class="inline-flex items-center"
85
+ ><span class="text-gray-500">{nameSpace}</span><span class="mx-1 text-black">/</span><span
86
+ class="text-black">{modelName}</span
87
+ ></span
88
  >
89
  </button>
90
  {/each}
src/lib/components/InferencePlayground/generationConfigSettings.ts CHANGED
@@ -12,47 +12,43 @@ export const GENERATION_CONFIG_SETTINGS: Record<string, GenerationKeySettings> =
12
  step: 0.01,
13
  min: 0,
14
  max: 2,
15
- label: 'Temperature'
16
  },
17
  max_tokens: {
18
  default: 512,
19
  step: 1,
20
  min: 1,
21
  max: 8192, // changed dynamically based on model
22
- label: 'Max Tokens'
23
  },
24
  top_p: {
25
  default: 0.7,
26
  step: 0.01,
27
  min: 0,
28
  max: 1,
29
- label: 'Top-P'
30
  },
31
  top_k: {
32
  default: 50,
33
  step: 1,
34
  min: 1,
35
  max: 100,
36
- label: 'Top-K'
37
  },
38
  repetition_penalty: {
39
  default: 1,
40
  step: 0.01,
41
  min: 1,
42
  max: 2,
43
- label: 'Repetition Penalty'
44
- }
45
  };
46
 
47
  export type GenerationConfigKey = keyof typeof GENERATION_CONFIG_SETTINGS;
48
 
49
- export const GENERATION_CONFIG_KEYS: GenerationConfigKey[] = ['temperature', 'max_tokens'];
50
 
51
- export const GENERATION_CONFIG_KEYS_ADVANCED: GenerationConfigKey[] = [
52
- 'top_p',
53
- 'top_k',
54
- 'repetition_penalty'
55
- ];
56
 
57
  export type GenerationConfig = Record<GenerationConfigKey, number>;
58
 
 
12
  step: 0.01,
13
  min: 0,
14
  max: 2,
15
+ label: "Temperature",
16
  },
17
  max_tokens: {
18
  default: 512,
19
  step: 1,
20
  min: 1,
21
  max: 8192, // changed dynamically based on model
22
+ label: "Max Tokens",
23
  },
24
  top_p: {
25
  default: 0.7,
26
  step: 0.01,
27
  min: 0,
28
  max: 1,
29
+ label: "Top-P",
30
  },
31
  top_k: {
32
  default: 50,
33
  step: 1,
34
  min: 1,
35
  max: 100,
36
+ label: "Top-K",
37
  },
38
  repetition_penalty: {
39
  default: 1,
40
  step: 0.01,
41
  min: 1,
42
  max: 2,
43
+ label: "Repetition Penalty",
44
+ },
45
  };
46
 
47
  export type GenerationConfigKey = keyof typeof GENERATION_CONFIG_SETTINGS;
48
 
49
+ export const GENERATION_CONFIG_KEYS: GenerationConfigKey[] = ["temperature", "max_tokens"];
50
 
51
+ export const GENERATION_CONFIG_KEYS_ADVANCED: GenerationConfigKey[] = ["top_p", "top_k", "repetition_penalty"];
 
 
 
 
52
 
53
  export type GenerationConfig = Record<GenerationConfigKey, number>;
54
 
src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts CHANGED
@@ -1,6 +1,6 @@
1
- import { type ChatCompletionInputMessage } from '@huggingface/tasks';
2
- import { HfInference } from '@huggingface/inference';
3
- import type { Conversation, ModelEntryWithTokenizer } from '$lib/types';
4
 
5
  export function createHfInference(token: string): HfInference {
6
  return new HfInference(token);
@@ -14,18 +14,16 @@ export async function handleStreamingResponse(
14
  systemMessage?: ChatCompletionInputMessage
15
  ): Promise<void> {
16
  const messages = [
17
- ...(isSystemPromptSupported(conversation.model) && systemMessage?.content?.length
18
- ? [systemMessage]
19
- : []),
20
- ...conversation.messages
21
  ];
22
- let out = '';
23
  for await (const chunk of hf.chatCompletionStream(
24
  {
25
  model: conversation.model.id,
26
  messages,
27
  temperature: conversation.config.temperature,
28
- max_tokens: conversation.config.maxTokens
29
  },
30
  { signal: abortController.signal }
31
  )) {
@@ -42,25 +40,23 @@ export async function handleNonStreamingResponse(
42
  systemMessage?: ChatCompletionInputMessage
43
  ): Promise<ChatCompletionInputMessage> {
44
  const messages = [
45
- ...(isSystemPromptSupported(conversation.model) && systemMessage?.content?.length
46
- ? [systemMessage]
47
- : []),
48
- ...conversation.messages
49
  ];
50
 
51
  const response = await hf.chatCompletion({
52
  model: conversation.model,
53
  messages,
54
  temperature: conversation.config.temperature,
55
- max_tokens: conversation.config.maxTokens
56
  });
57
 
58
  if (response.choices && response.choices.length > 0) {
59
  return response.choices[0].message;
60
  }
61
- throw new Error('No response from the model');
62
  }
63
 
64
  export function isSystemPromptSupported(model: ModelEntryWithTokenizer) {
65
- return model.tokenizerConfig?.chat_template?.includes('system');
66
  }
 
1
+ import { type ChatCompletionInputMessage } from "@huggingface/tasks";
2
+ import { HfInference } from "@huggingface/inference";
3
+ import type { Conversation, ModelEntryWithTokenizer } from "$lib/types";
4
 
5
  export function createHfInference(token: string): HfInference {
6
  return new HfInference(token);
 
14
  systemMessage?: ChatCompletionInputMessage
15
  ): Promise<void> {
16
  const messages = [
17
+ ...(isSystemPromptSupported(conversation.model) && systemMessage?.content?.length ? [systemMessage] : []),
18
+ ...conversation.messages,
 
 
19
  ];
20
+ let out = "";
21
  for await (const chunk of hf.chatCompletionStream(
22
  {
23
  model: conversation.model.id,
24
  messages,
25
  temperature: conversation.config.temperature,
26
+ max_tokens: conversation.config.maxTokens,
27
  },
28
  { signal: abortController.signal }
29
  )) {
 
40
  systemMessage?: ChatCompletionInputMessage
41
  ): Promise<ChatCompletionInputMessage> {
42
  const messages = [
43
+ ...(isSystemPromptSupported(conversation.model) && systemMessage?.content?.length ? [systemMessage] : []),
44
+ ...conversation.messages,
 
 
45
  ];
46
 
47
  const response = await hf.chatCompletion({
48
  model: conversation.model,
49
  messages,
50
  temperature: conversation.config.temperature,
51
+ max_tokens: conversation.config.maxTokens,
52
  });
53
 
54
  if (response.choices && response.choices.length > 0) {
55
  return response.choices[0].message;
56
  }
57
+ throw new Error("No response from the model");
58
  }
59
 
60
  export function isSystemPromptSupported(model: ModelEntryWithTokenizer) {
61
+ return model.tokenizerConfig?.chat_template?.includes("system");
62
  }
src/lib/types/index.d.ts CHANGED
@@ -1,6 +1,6 @@
1
- import type { GenerationConfig } from '$lib/components/InferencePlayground/generationConfigSettings';
2
- import type { ModelEntry } from '@huggingface/hub';
3
- import type { ChatCompletionInputMessage } from '@huggingface/tasks';
4
 
5
  type Conversation = {
6
  model: ModelEntryWithTokenizer;
 
1
+ import type { GenerationConfig } from "$lib/components/InferencePlayground/generationConfigSettings";
2
+ import type { ModelEntry } from "@huggingface/hub";
3
+ import type { ChatCompletionInputMessage } from "@huggingface/tasks";
4
 
5
  type Conversation = {
6
  model: ModelEntryWithTokenizer;
src/routes/+layout.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script>
2
- import '../app.css';
3
  </script>
4
 
5
  <slot></slot>
 
1
  <script>
2
+ import "../app.css";
3
  </script>
4
 
5
  <slot></slot>
src/routes/+page.server.ts CHANGED
@@ -1,22 +1,21 @@
1
- import type { ModelEntryWithTokenizer } from '$lib/types';
2
- import type { ModelEntry } from '@huggingface/hub';
3
- import type { PageServerLoad } from './$types';
4
- import { env } from '$env/dynamic/private';
5
 
6
  export const load: PageServerLoad = async ({ fetch }) => {
7
- const apiUrl =
8
- 'https://huggingface.co/api/models?pipeline_tag=text-generation&inference=Warm&filter=conversational';
9
  const HF_TOKEN = env.HF_TOKEN;
10
 
11
  const res = await fetch(apiUrl, {
12
  headers: {
13
- Authorization: `Bearer ${HF_TOKEN}`
14
- }
15
  });
16
  const compatibleModels: ModelEntry[] = await res.json();
17
  compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
18
 
19
- const promises = compatibleModels.map(async (model) => {
20
  const configUrl = `https://huggingface.co/${model.modelId}/raw/main/tokenizer_config.json`;
21
  const res = await fetch(configUrl);
22
  if (!res.ok) {
@@ -26,9 +25,7 @@ export const load: PageServerLoad = async ({ fetch }) => {
26
  return { ...model, tokenizerConfig } satisfies ModelEntryWithTokenizer;
27
  });
28
 
29
- const models: ModelEntryWithTokenizer[] = (await Promise.all(promises)).filter(
30
- (model) => model !== null
31
- );
32
 
33
  return { models };
34
  };
 
1
+ import type { ModelEntryWithTokenizer } from "$lib/types";
2
+ import type { ModelEntry } from "@huggingface/hub";
3
+ import type { PageServerLoad } from "./$types";
4
+ import { env } from "$env/dynamic/private";
5
 
6
  export const load: PageServerLoad = async ({ fetch }) => {
7
+ const apiUrl = "https://huggingface.co/api/models?pipeline_tag=text-generation&inference=Warm&filter=conversational";
 
8
  const HF_TOKEN = env.HF_TOKEN;
9
 
10
  const res = await fetch(apiUrl, {
11
  headers: {
12
+ Authorization: `Bearer ${HF_TOKEN}`,
13
+ },
14
  });
15
  const compatibleModels: ModelEntry[] = await res.json();
16
  compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
17
 
18
+ const promises = compatibleModels.map(async model => {
19
  const configUrl = `https://huggingface.co/${model.modelId}/raw/main/tokenizer_config.json`;
20
  const res = await fetch(configUrl);
21
  if (!res.ok) {
 
25
  return { ...model, tokenizerConfig } satisfies ModelEntryWithTokenizer;
26
  });
27
 
28
+ const models: ModelEntryWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
 
 
29
 
30
  return { models };
31
  };
src/routes/+page.svelte CHANGED
@@ -1,6 +1,6 @@
1
  <script lang="ts">
2
  export let data;
3
- import InferencePlayground from '$lib/components/InferencePlayground/InferencePlayground.svelte';
4
  </script>
5
 
6
  <InferencePlayground models={data.models} />
 
1
  <script lang="ts">
2
  export let data;
3
+ import InferencePlayground from "$lib/components/InferencePlayground/InferencePlayground.svelte";
4
  </script>
5
 
6
  <InferencePlayground models={data.models} />
svelte.config.js CHANGED
@@ -1,5 +1,5 @@
1
- import adapter from '@sveltejs/adapter-node';
2
- import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
3
 
4
  /** @type {import('@sveltejs/kit').Config} */
5
  const config = {
@@ -11,8 +11,8 @@ const config = {
11
  // adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list.
12
  // If your environment is not supported, or you settled on a specific environment, switch out the adapter.
13
  // See https://kit.svelte.dev/docs/adapters for more information about adapters.
14
- adapter: adapter()
15
- }
16
  };
17
 
18
  export default config;
 
1
+ import adapter from "@sveltejs/adapter-node";
2
+ import { vitePreprocess } from "@sveltejs/vite-plugin-svelte";
3
 
4
  /** @type {import('@sveltejs/kit').Config} */
5
  const config = {
 
11
  // adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list.
12
  // If your environment is not supported, or you settled on a specific environment, switch out the adapter.
13
  // See https://kit.svelte.dev/docs/adapters for more information about adapters.
14
+ adapter: adapter(),
15
+ },
16
  };
17
 
18
  export default config;
tailwind.config.ts CHANGED
@@ -1,12 +1,12 @@
1
- import type { Config } from 'tailwindcss';
2
- import containerQueries from '@tailwindcss/container-queries';
3
 
4
  export default {
5
- content: ['./src/**/*.{html,js,svelte,ts}'],
6
 
7
  theme: {
8
- extend: {}
9
  },
10
 
11
- plugins: [containerQueries]
12
  } as Config;
 
1
+ import type { Config } from "tailwindcss";
2
+ import containerQueries from "@tailwindcss/container-queries";
3
 
4
  export default {
5
+ content: ["./src/**/*.{html,js,svelte,ts}"],
6
 
7
  theme: {
8
+ extend: {},
9
  },
10
 
11
+ plugins: [containerQueries],
12
  } as Config;
vite.config.ts CHANGED
@@ -1,6 +1,6 @@
1
- import { sveltekit } from '@sveltejs/kit/vite';
2
- import { defineConfig } from 'vite';
3
 
4
  export default defineConfig({
5
- plugins: [sveltekit()]
6
  });
 
1
+ import { sveltekit } from "@sveltejs/kit/vite";
2
+ import { defineConfig } from "vite";
3
 
4
  export default defineConfig({
5
+ plugins: [sveltekit()],
6
  });