diff --git a/src/app.html b/src/app.html index 5f79324942486..cdfdad8b3f2dc 100644 --- a/src/app.html +++ b/src/app.html @@ -36,6 +36,11 @@ }, propertyConfiguration: { // Properties Plugin configuration + gpcDataSharingOptIn: false, + callback: { + userConsentDetails: _getWcpUserConsentDetails + }, + env: 'PROD' // Environment can be set to PPE or PROD as needed. }, webAnalyticsConfiguration: { @@ -77,6 +82,7 @@ } }; + var siteConsent = null; WcpConsent.init( 'en-US', 'cookie-banner', @@ -91,6 +97,24 @@ WcpConsent.themes.light ); + function _getWcpUserConsentDetails() { + if (siteConsent) { + return siteConsent.getConsent(); + } + + // The exact value that you return here is dependent on your site, team and how + // use any data that is stored (work with you privacy team to determine what the + // correct "defaults" (true or false) should be for each item when the code is + // unable to determine (via WCP) if or what the user has (or has not) consented + // to. + return { + Required: [true], // Most likely `true` + Analytics: [true], + SocialMedia: [true], + Advertising: [false] + }; + } + function onConsentChanged(categoryPreferences) { if (categoryPreferences.Analytics) { // Google Analytics diff --git a/src/routes/components/generative-ai-hero.svelte b/src/routes/components/generative-ai-hero.svelte index 0c5a6054d3a03..bff3953cc7ebd 100644 --- a/src/routes/components/generative-ai-hero.svelte +++ b/src/routes/components/generative-ai-hero.svelte @@ -15,7 +15,7 @@ and more.


- Learn more about ONNX Runtime & Generative AI → + Learn more about ONNX Runtime & Generative AI →
diff --git a/src/routes/generative-ai/+page.svelte b/src/routes/generative-ai/+page.svelte index beb6588e8b9a2..0cddd34fe9851 100644 --- a/src/routes/generative-ai/+page.svelte +++ b/src/routes/generative-ai/+page.svelte @@ -1,86 +1,157 @@ + - + - + + - - - + + + - - + + +
-

Stable Diffusion + ONNX Runtime

-

Use ONNX Runtime to accelerate this popular image generation model.

-

Benefits

-
-
-
-

Run Stable Diffusion outside of a Python environment

-
- Inference Stable Diffusion → +
+
+ Representing generative AI +
+
+

What is Generative AI?

+

+ Generative AI refers to artificial intelligence that creates new content—such as text, + images, audio, or code—based on patterns learned from existing data. Generative AI leverages + transformer models for text and diffusion models for images. These innovations are + transforming industries, enabling personalized experiences, automating creative processes, + and opening new possibilities for content generation! +

+
+
+ +
+ +
+

Generative AI Models

+
+ + {#each [{ title: 'Text Generation Models', description: 'Generate human-like text for chatbots, content creation, summarization, and more.', demos: [{ name: 'Llama', url: 'https://huggingface.co/meta-llama/Meta-Llama-3.1-8B' }, { name: 'Mistral', url: 'https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3' }, { name: 'Phi', url: 'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct' }] }, { title: 'Image Generation Models', description: 'Create artwork or realistic images from descriptions using AI models like Stable Diffusion.', demos: [{ name: 'Stable Diffusion', url: 'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0' }] }, { title: 'Audio Models', description: 'Generate audio, music, or speech from data inputs with AI models like Whisper.', demos: [{ name: 'Whisper', url: 'https://huggingface.co/spaces/Xenova/whisper-web' }] }, { title: 'Other Models', description: 'Generate diverse outputs like code, video, or 3D designs.', demos: [{ name: 'Request a Model', url: 'https://github.com/microsoft/onnxruntime-genai/discussions/categories/model-support' }] }] as model} +
+
+

{model.title}

+

{model.description}

+
+
+ {#each model.demos as demo} + {demo.name} + {/each} +
-
+ {/each}
-
-
-

- Speed up inference of Stable Diffusion on NVIDIA and AMD GPUs +

+ +
+ +
+

ONNX Runtime ❤️ Generative AI

+

+ Use ONNX Runtime for high performance, scalability, and flexibility when deploying generative + AI models. With support for diverse frameworks and hardware acceleration, ONNX Runtime ensures + efficient, cost-effective model inference across platforms. +

+ +
+
+

Run ONNX Runtime on:

+

+ {#key cycleIndex} + {cycleWords[cycleIndex]} + Platform for desktop, mobile, and cloud usage + {/key}

- +
+ + +
+ {#each [{ title: 'Multiplatform', description: 'Run ONNX Runtime on Desktop 🖥️, Mobile 📱, Browser 💻, or Cloud ☁️.' }, { title: 'On Device', description: 'Inference privately 🔐 and save costs ⚙️ with on-device models.' }, { title: 'Multimodal Compatibility', description: 'Use ONNX Runtime with vision or omni models. We work to quickly enable all new model scenarios 🚀.' }, { title: 'Easy to Use', description: 'Get started quickly ⏩ with our examples and tutorials.' }] as feature} +
+
+

{feature.title}

+

{feature.description}

+
+
+ {/each}
-

Performance

-

The average latency in seconds on Stable Diffusion v1.5 and v2.1 models:

-
-
- Stable Diffusion v1.5 latency graphs -
-
- Stable Diffusion v2.1 latency graphs + +
+

Tutorials & Demos

+

Get started with any of these tutorials and demos:

+
+ + {#each [{ title: 'Phi-3 Vision', img: coffee, description: 'A Desktop app demo to interact with text and images simultaneously.', url: 'https://onnxruntime.ai/docs/genai/tutorials/phi3-v.html' }, { title: 'LLM Chat App', img: vision_ui, description: 'Pick your favorite model and start chatting!', url: 'https://github.com/microsoft/onnxruntime-genai/tree/main/examples/chat_app' }, { title: 'Whisper in Browser', img: whisper, description: 'Run whisper to transcribe user audio in your browser!', url: 'https://github.com/microsoft/onnxruntime-inference-examples/tree/main/js/ort-whisper' }] as tutorial} +
+
{tutorial.title}
+
+

{tutorial.title}

+

{tutorial.description}

+ Try it out! +
+
+ {/each} +
-
-

Large Language Models + ONNX Runtime

-

- ONNX Runtime supports many popular large language model (LLM) families in the Hugging Face Model - Hub. These, along with thousands of other models, are easily convertible to ONNX using the - Optimum API. -

-
diff --git a/src/routes/generative-ai/aibrain.webp b/src/routes/generative-ai/aibrain.webp new file mode 100644 index 0000000000000..03107b21fa52b Binary files /dev/null and b/src/routes/generative-ai/aibrain.webp differ diff --git a/src/routes/generative-ai/browser.png b/src/routes/generative-ai/browser.png new file mode 100644 index 0000000000000..6328326455549 Binary files /dev/null and b/src/routes/generative-ai/browser.png differ diff --git a/src/routes/generative-ai/coffee.png b/src/routes/generative-ai/coffee.png new file mode 100644 index 0000000000000..be588435b9c1e Binary files /dev/null and b/src/routes/generative-ai/coffee.png differ diff --git a/src/routes/generative-ai/desktop.png b/src/routes/generative-ai/desktop.png new file mode 100644 index 0000000000000..0faa3d85cb633 Binary files /dev/null and b/src/routes/generative-ai/desktop.png differ diff --git a/src/routes/generative-ai/mobile.png b/src/routes/generative-ai/mobile.png new file mode 100644 index 0000000000000..a1518b5415fdd Binary files /dev/null and b/src/routes/generative-ai/mobile.png differ diff --git a/src/routes/generative-ai/vision_UI.png b/src/routes/generative-ai/vision_UI.png new file mode 100644 index 0000000000000..93ed7cdbde6fb Binary files /dev/null and b/src/routes/generative-ai/vision_UI.png differ diff --git a/src/routes/generative-ai/whisper.png b/src/routes/generative-ai/whisper.png new file mode 100644 index 0000000000000..51a02d7dcd6db Binary files /dev/null and b/src/routes/generative-ai/whisper.png differ diff --git a/src/routes/testimonials/testimonial-card.svelte b/src/routes/testimonials/testimonial-card.svelte index 8092ccc03485f..3f0be61aa0d5d 100644 --- a/src/routes/testimonials/testimonial-card.svelte +++ b/src/routes/testimonials/testimonial-card.svelte @@ -24,7 +24,7 @@