diff --git a/src/app.html b/src/app.html index 5f79324942486..cdfdad8b3f2dc 100644 --- a/src/app.html +++ b/src/app.html @@ -36,6 +36,11 @@ }, propertyConfiguration: { // Properties Plugin configuration + gpcDataSharingOptIn: false, + callback: { + userConsentDetails: _getWcpUserConsentDetails + }, + env: 'PROD' // Environment can be set to PPE or PROD as needed. }, webAnalyticsConfiguration: { @@ -77,6 +82,7 @@ } }; + var siteConsent = null; WcpConsent.init( 'en-US', 'cookie-banner', @@ -91,6 +97,24 @@ WcpConsent.themes.light ); + function _getWcpUserConsentDetails() { + if (siteConsent) { + return siteConsent.getConsent(); + } + + // The exact value that you return here is dependent on your site, team and how + // use any data that is stored (work with you privacy team to determine what the + // correct "defaults" (true or false) should be for each item when the code is + // unable to determine (via WCP) if or what the user has (or has not) consented + // to. + return { + Required: [true], // Most likely `true` + Analytics: [true], + SocialMedia: [true], + Advertising: [false] + }; + } + function onConsentChanged(categoryPreferences) { if (categoryPreferences.Analytics) { // Google Analytics diff --git a/src/routes/components/generative-ai-hero.svelte b/src/routes/components/generative-ai-hero.svelte index 0c5a6054d3a03..bff3953cc7ebd 100644 --- a/src/routes/components/generative-ai-hero.svelte +++ b/src/routes/components/generative-ai-hero.svelte @@ -15,7 +15,7 @@ and more.
Use ONNX Runtime to accelerate this popular image generation model.
-+ Generative AI refers to artificial intelligence that creates new content—such as text, + images, audio, or code—based on patterns learned from existing data. Generative AI leverages + transformer models for text and diffusion models for images. These innovations are + transforming industries, enabling personalized experiences, automating creative processes, + and opening new possibilities for content generation! +
+{model.description}
++ Use ONNX Runtime for high performance, scalability, and flexibility when deploying generative + AI models. With support for diverse frameworks and hardware acceleration, ONNX Runtime ensures + efficient, cost-effective model inference across platforms. +
+ +{feature.description}
+The average latency in seconds on Stable Diffusion v1.5 and v2.1 models:
-Get started with any of these tutorials and demos:
+- ONNX Runtime supports many popular large language model (LLM) families in the Hugging Face Model - Hub. These, along with thousands of other models, are easily convertible to ONNX using the - Optimum API. -
-