diff --git a/ProductivitySuite/README.md b/ProductivitySuite/README.md
new file mode 100644
index 000000000..036b2711e
--- /dev/null
+++ b/ProductivitySuite/README.md
@@ -0,0 +1,23 @@
+# OPEA Productivity Suite Application
+
+OPEA Productivity Suite, is a powerful tool designed to streamline your workflow and boost productivity. This application leverages the cutting-edge OPEA microservices to provide a comprehensive suite of features that cater to the diverse needs of modern enterprises.
+
+### Key Features
+
+- Chat with Documents: Engage in intelligent conversations with your documents using our advanced RAG Capabilities. Our Retrieval-Augmented Generation (RAG) model allows you to ask questions, receive relevant information, and gain insights from your documents in real-time.
+
+- Content Summarization: Save time and effort by automatically summarizing lengthy documents or articles, enabling you to quickly grasp the key takeaways.
+
+- FAQ Generation: Effortlessly create comprehensive FAQs based on your documents, ensuring that your users have access to the information they need.
+
+- Code Generation: Boost your coding productivity with our code generation feature. Simply provide a description of the functionality you require, and the application will generate the corresponding code snippets, saving you valuable time and effort.
+
+- User Context Management: Maintain a seamless workflow by managing your user's context within the application. Our context management system keeps track of your documents and chat history, allowing for personalized experiences.
+
+- Identity and access management: uses OpenSource platform (Keycloak) for single sign-on identity and access management.
+
+Refer to the [Keycloak Configuration Guide](./docker/xeon/keycloak_setup_guide.md) for more instruction on setup Keycloak.
+
+Refer to the [Xeon Guide](./docker/xeon/README.md) for more instructions on building docker images from source and running the application via docker compose.
+
+Refer to the [Xeon Kubernetes Guide](./kubernetes/manifests/README.md) for more instruction on deploying the application via kubernetes.
diff --git a/ProductivitySuite/assets/img/Login_page.png b/ProductivitySuite/assets/img/Login_page.png
new file mode 100644
index 000000000..0c325e5f2
Binary files /dev/null and b/ProductivitySuite/assets/img/Login_page.png differ
diff --git a/ProductivitySuite/assets/img/chat_qna_init.png b/ProductivitySuite/assets/img/chat_qna_init.png
new file mode 100644
index 000000000..c13630594
Binary files /dev/null and b/ProductivitySuite/assets/img/chat_qna_init.png differ
diff --git a/ProductivitySuite/assets/img/chatqna_with_conversation.png b/ProductivitySuite/assets/img/chatqna_with_conversation.png
new file mode 100644
index 000000000..1dad3a099
Binary files /dev/null and b/ProductivitySuite/assets/img/chatqna_with_conversation.png differ
diff --git a/ProductivitySuite/assets/img/codegen.png b/ProductivitySuite/assets/img/codegen.png
new file mode 100644
index 000000000..a4d38f6f4
Binary files /dev/null and b/ProductivitySuite/assets/img/codegen.png differ
diff --git a/ProductivitySuite/assets/img/create_client.png b/ProductivitySuite/assets/img/create_client.png
new file mode 100644
index 000000000..ab7d22808
Binary files /dev/null and b/ProductivitySuite/assets/img/create_client.png differ
diff --git a/ProductivitySuite/assets/img/create_productivitysuite_realm.png b/ProductivitySuite/assets/img/create_productivitysuite_realm.png
new file mode 100644
index 000000000..47b364991
Binary files /dev/null and b/ProductivitySuite/assets/img/create_productivitysuite_realm.png differ
diff --git a/ProductivitySuite/assets/img/create_realm.png b/ProductivitySuite/assets/img/create_realm.png
new file mode 100644
index 000000000..d05e5ec26
Binary files /dev/null and b/ProductivitySuite/assets/img/create_realm.png differ
diff --git a/ProductivitySuite/assets/img/create_roles.png b/ProductivitySuite/assets/img/create_roles.png
new file mode 100644
index 000000000..75417ebd6
Binary files /dev/null and b/ProductivitySuite/assets/img/create_roles.png differ
diff --git a/ProductivitySuite/assets/img/create_users.png b/ProductivitySuite/assets/img/create_users.png
new file mode 100644
index 000000000..5e3205e3a
Binary files /dev/null and b/ProductivitySuite/assets/img/create_users.png differ
diff --git a/ProductivitySuite/assets/img/data_source.png b/ProductivitySuite/assets/img/data_source.png
new file mode 100644
index 000000000..ae45e1223
Binary files /dev/null and b/ProductivitySuite/assets/img/data_source.png differ
diff --git a/ProductivitySuite/assets/img/doc_summary_file.png b/ProductivitySuite/assets/img/doc_summary_file.png
new file mode 100644
index 000000000..d4c18ebd4
Binary files /dev/null and b/ProductivitySuite/assets/img/doc_summary_file.png differ
diff --git a/ProductivitySuite/assets/img/doc_summary_paste.png b/ProductivitySuite/assets/img/doc_summary_paste.png
new file mode 100644
index 000000000..ddaa2f3b3
Binary files /dev/null and b/ProductivitySuite/assets/img/doc_summary_paste.png differ
diff --git a/ProductivitySuite/assets/img/faq_generator.png b/ProductivitySuite/assets/img/faq_generator.png
new file mode 100644
index 000000000..7c25eea1a
Binary files /dev/null and b/ProductivitySuite/assets/img/faq_generator.png differ
diff --git a/ProductivitySuite/assets/img/keycloak_login.png b/ProductivitySuite/assets/img/keycloak_login.png
new file mode 100644
index 000000000..abbdbebee
Binary files /dev/null and b/ProductivitySuite/assets/img/keycloak_login.png differ
diff --git a/ProductivitySuite/assets/img/productivitysuite_client_settings.png b/ProductivitySuite/assets/img/productivitysuite_client_settings.png
new file mode 100644
index 000000000..8c64451c2
Binary files /dev/null and b/ProductivitySuite/assets/img/productivitysuite_client_settings.png differ
diff --git a/ProductivitySuite/assets/img/set_user_password.png b/ProductivitySuite/assets/img/set_user_password.png
new file mode 100644
index 000000000..24af0caf3
Binary files /dev/null and b/ProductivitySuite/assets/img/set_user_password.png differ
diff --git a/ProductivitySuite/assets/img/user_role_mapping.png b/ProductivitySuite/assets/img/user_role_mapping.png
new file mode 100644
index 000000000..2dc0feab2
Binary files /dev/null and b/ProductivitySuite/assets/img/user_role_mapping.png differ
diff --git a/ProductivitySuite/docker/docker_build_compose.yaml b/ProductivitySuite/docker/docker_build_compose.yaml
new file mode 100644
index 000000000..4b205a361
--- /dev/null
+++ b/ProductivitySuite/docker/docker_build_compose.yaml
@@ -0,0 +1,100 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+services:
+ chatqna:
+ build:
+ args:
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ no_proxy: ${no_proxy}
+ context: ../../ChatQnA/docker/
+ dockerfile: ./Dockerfile
+ image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
+ embedding-tei:
+ build:
+ context: GenAIComps
+ dockerfile: comps/embeddings/langchain/docker/Dockerfile
+ extends: chatqna
+ image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
+ retriever-redis:
+ build:
+ context: GenAIComps
+ dockerfile: comps/retrievers/langchain/redis/docker/Dockerfile
+ extends: chatqna
+ image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
+ reranking-tei:
+ build:
+ context: GenAIComps
+ dockerfile: comps/reranks/tei/docker/Dockerfile
+ extends: chatqna
+ image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
+ llm-tgi:
+ build:
+ context: GenAIComps
+ dockerfile: comps/llms/text-generation/tgi/Dockerfile
+ extends: chatqna
+ image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
+ dataprep-redis:
+ build:
+ context: GenAIComps
+ dockerfile: comps/dataprep/redis/langchain/docker/Dockerfile
+ extends: chatqna
+ image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
+ promptregistry-mongo:
+ build:
+ context: GenAIComps
+ dockerfile: comps/prompt_registry/mongo/docker/Dockerfile
+ extends: chatqna
+ image: ${REGISTRY:-opea}/promptregistry-mongo-server:${TAG:-latest}
+ chathistory-mongo:
+ build:
+ context: GenAIComps
+ dockerfile: comps/chathistory/mongo/docker/Dockerfile
+ extends: chatqna
+ image: ${REGISTRY:-opea}/chathistory-mongo-server:${TAG:-latest}
+ productivity-suite-react-ui:
+ build:
+ context: ui
+ dockerfile: ./docker/Dockerfile.react
+ extends: chatqna
+ image: ${REGISTRY:-opea}/productivity-suite-react-ui-server:${TAG:-latest}
+ codegen:
+ build:
+ args:
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ no_proxy: ${no_proxy}
+ context: ../../CodeGen/docker/
+ dockerfile: ./Dockerfile
+ image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
+ docsum:
+ build:
+ args:
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ no_proxy: ${no_proxy}
+ context: ../../DocSum/docker/
+ dockerfile: ./Dockerfile
+ image: ${REGISTRY:-opea}/docsum:${TAG:-latest}
+ faqgen:
+ build:
+ args:
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ no_proxy: ${no_proxy}
+ context: ../../FaqGen/docker/
+ dockerfile: ./Dockerfile
+ image: ${REGISTRY:-opea}/faqgen:${TAG:-latest}
+ llm_faqgen:
+ build:
+ context: GenAIComps
+ dockerfile: comps/llms/faq-generation/tgi/Dockerfile
+ extends: faqgen
+ image: ${REGISTRY:-opea}/llm-faqgen-tgi:${TAG:-latest}
+ llm_docsum_server:
+ build:
+ context: GenAIComps
+ dockerfile: comps/llms/summarization/tgi/Dockerfile
+ extends: docsum
+ image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest}
diff --git a/ProductivitySuite/docker/ui/docker/Dockerfile.react b/ProductivitySuite/docker/ui/docker/Dockerfile.react
new file mode 100644
index 000000000..f023b7afb
--- /dev/null
+++ b/ProductivitySuite/docker/ui/docker/Dockerfile.react
@@ -0,0 +1,21 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+# Use node 20.11.1 as the base image
+FROM node:20.11.1 as vite-app
+
+COPY ./react /usr/app/react
+WORKDIR /usr/app/react
+
+
+RUN ["npm", "install"]
+RUN ["npm", "run", "build"]
+
+
+FROM nginx:alpine
+
+COPY --from=vite-app /usr/app/react/dist /usr/share/nginx/html
+COPY ./react/env.sh /docker-entrypoint.d/env.sh
+
+COPY ./react/nginx.conf /etc/nginx/conf.d/default.conf
+RUN chmod +x /docker-entrypoint.d/env.sh
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/.env.production b/ProductivitySuite/docker/ui/react/.env.production
new file mode 100644
index 000000000..a7b38a272
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/.env.production
@@ -0,0 +1,16 @@
+VITE_BACKEND_SERVICE_ENDPOINT_CHATQNA=APP_BACKEND_SERVICE_ENDPOINT_CHATQNA
+VITE_BACKEND_SERVICE_ENDPOINT_CODEGEN=APP_BACKEND_SERVICE_ENDPOINT_CODEGEN
+VITE_BACKEND_SERVICE_ENDPOINT_DOCSUM=APP_BACKEND_SERVICE_ENDPOINT_DOCSUM
+VITE_BACKEND_SERVICE_ENDPOINT_FAQGEN=APP_BACKEND_SERVICE_ENDPOINT_FAQGEN
+VITE_KEYCLOAK_SERVICE_ENDPOINT=APP_KEYCLOAK_SERVICE_ENDPOINT
+
+
+VITE_DATAPREP_SERVICE_ENDPOINT=APP_DATAPREP_SERVICE_ENDPOINT
+VITE_DATAPREP_GET_FILE_ENDPOINT=APP_DATAPREP_GET_FILE_ENDPOINT
+VITE_DATAPREP_DELETE_FILE_ENDPOINT=APP_DATAPREP_DELETE_FILE_ENDPOINT
+
+VITE_CHAT_HISTORY_CREATE_ENDPOINT=APP_CHAT_HISTORY_CREATE_ENDPOINT
+VITE_CHAT_HISTORY_GET_ENDPOINT=APP_CHAT_HISTORY_GET_ENDPOINT
+VITE_CHAT_HISTORY_DELETE_ENDPOINT=APP_CHAT_HISTORY_DELETE_ENDPOINT
+VITE_PROMPT_SERVICE_GET_ENDPOINT=APP_PROMPT_SERVICE_GET_ENDPOINT
+VITE_PROMPT_SERVICE_CREATE_ENDPOINT=APP_PROMPT_SERVICE_CREATE_ENDPOINT
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/.eslintrc.cjs b/ProductivitySuite/docker/ui/react/.eslintrc.cjs
new file mode 100644
index 000000000..78174f683
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/.eslintrc.cjs
@@ -0,0 +1,11 @@
+module.exports = {
+ root: true,
+ env: { browser: true, es2020: true },
+ extends: ["eslint:recommended", "plugin:@typescript-eslint/recommended", "plugin:react-hooks/recommended"],
+ ignorePatterns: ["dist", ".eslintrc.cjs"],
+ parser: "@typescript-eslint/parser",
+ plugins: ["react-refresh"],
+ rules: {
+ "react-refresh/only-export-components": ["warn", { allowConstantExport: true }],
+ },
+};
diff --git a/ProductivitySuite/docker/ui/react/.gitignore b/ProductivitySuite/docker/ui/react/.gitignore
new file mode 100644
index 000000000..a547bf36d
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/.gitignore
@@ -0,0 +1,24 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
diff --git a/ProductivitySuite/docker/ui/react/README.md b/ProductivitySuite/docker/ui/react/README.md
new file mode 100644
index 000000000..72703ef3b
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/README.md
@@ -0,0 +1,82 @@
+
Productivity Suite React UI
+
+### 📸 Project Screenshots
+
+![project-screenshot](../../../assets/img/chat_qna_init.png)
+![project-screenshot](../../../assets/img/Login_page.png)
+
+🧐 Features
+
+Here're some of the project's features:
+
+#### CHAT QNA
+
+- Start a Text Chat:Initiate a text chat with the ability to input written conversations, where the dialogue content can also be customized based on uploaded files.
+- Context Awareness: The AI assistant maintains the context of the conversation, understanding references to previous statements or questions. This allows for more natural and coherent exchanges.
+
+ ##### DATA SOURCE
+
+ - The choice between uploading locally or copying a remote link. Chat according to uploaded knowledge base.
+ - Uploaded File would get listed and user would be able add or remove file/links
+
+ ###### Screen Shot
+
+ ![project-screenshot](../../../assets/img/data_source.png)
+
+- Clear: Clear the record of the current dialog box without retaining the contents of the dialog box.
+- Chat history: Historical chat records can still be retained after refreshing, making it easier for users to view the context.
+- Conversational Chat : The application maintains a history of the conversation, allowing users to review previous messages and the AI to refer back to earlier points in the dialogue when necessary.
+ ###### Screen Shots
+ ![project-screenshot](../../../assets/img/chat_qna_init.png)
+ ![project-screenshot](../../../assets/img/chatqna_with_conversation.png)
+
+#### CODEGEN
+
+- Generate code: generate the corresponding code based on the current user's input.
+ ###### Screen Shot
+ ![project-screenshot](../../../assets/img/codegen.png)
+
+#### DOC SUMMARY
+
+- Summarizing Uploaded Files: Upload files from their local device, then click 'Generate Summary' to summarize the content of the uploaded file. The summary will be displayed on the 'Summary' box.
+- Summarizing Text via Pasting: Paste the text to be summarized into the text box, then click 'Generate Summary' to produce a condensed summary of the content, which will be displayed in the 'Summary' box on the right.
+- Scroll to Bottom: The summarized content will automatically scroll to the bottom.
+ ###### Screen Shot
+ ![project-screenshot](../../../assets/img/doc_summary_paste.png)
+ ![project-screenshot](../../../assets/img/doc_summary_file.png)
+
+#### FAQ Generator
+
+- Generate FAQs from Text via Pasting: Paste the text to into the text box, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below.
+
+- Generate FAQs from Text via txt file Upload: Upload the file in the Upload bar, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below.
+ ###### Screen Shot
+ ![project-screenshot](../../../assets/img/faq_generator.png)
+
+🛠️ Get it Running:
+
+1. Clone the repo.
+
+2. cd command to the current folder.
+
+3. create a .env file and add the following variables and values.
+ ```env
+ VITE_BACKEND_SERVICE_ENDPOINT_CHATQNA=''
+ VITE_BACKEND_SERVICE_ENDPOINT_CODEGEN=''
+ VITE_BACKEND_SERVICE_ENDPOINT_DOCSUM=''
+ VITE_BACKEND_SERVICE_ENDPOINT_FAQGEN=''
+ VITE_KEYCLOAK_SERVICE_ENDPOINT=''
+ VITE_DATAPREP_SERVICE_ENDPOINT=''
+ VITE_DATAPREP_GET_FILE_ENDPOINT=''
+ VITE_DATAPREP_DELETE_FILE_ENDPOINT=''
+ VITE_CHAT_HISTORY_CREATE_ENDPOINT=''
+ VITE_CHAT_HISTORY_GET_ENDPOINT=''
+ VITE_CHAT_HISTORY_DELETE_ENDPOINT=''
+ VITE_PROMPT_SERVICE_GET_ENDPOINT=''
+ VITE_PROMPT_SERVICE_CREATE_ENDPOINT=''
+ ```
+4. Execute `npm install` to install the corresponding dependencies.
+
+5. Execute `npm run dev`
+
+6. open http://localhost:5174 in browser to the see the UI
diff --git a/ProductivitySuite/docker/ui/react/env.sh b/ProductivitySuite/docker/ui/react/env.sh
new file mode 100644
index 000000000..ce1372ea6
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/env.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+for i in $(env | grep APP_) #// Make sure to use the prefix MY_APP_ if you have any other prefix in env.production file variable name replace it with MY_APP_
+do
+ key=$(echo $i | cut -d '=' -f 1)
+ value=$(echo $i | cut -d '=' -f 2-)
+ echo $key=$value
+ # sed All files
+ # find /usr/share/nginx/html -type f -exec sed -i "s|${key}|${value}|g" '{}' +
+
+ # sed JS and CSS only
+ find /usr/share/nginx/html -type f \( -name '*.js' -o -name '*.css' \) -exec sed -i "s|${key}|${value}|g" '{}' +
+done
diff --git a/ProductivitySuite/docker/ui/react/index.html b/ProductivitySuite/docker/ui/react/index.html
new file mode 100644
index 000000000..fbe87e0fd
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/index.html
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+ Conversations UI
+
+
+
+
+
+
diff --git a/ProductivitySuite/docker/ui/react/nginx.conf b/ProductivitySuite/docker/ui/react/nginx.conf
new file mode 100644
index 000000000..00433fcda
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/nginx.conf
@@ -0,0 +1,20 @@
+server {
+ listen 80;
+
+ gzip on;
+ gzip_proxied any;
+ gzip_comp_level 6;
+ gzip_buffers 16 8k;
+ gzip_http_version 1.1;
+ gzip_types font/woff2 text/css application/javascript application/json application/font-woff application/font-tff image/gif image/png image/svg+xml application/octet-stream;
+
+ location / {
+ root /usr/share/nginx/html;
+ index index.html index.htm;
+ try_files $uri $uri/ /index.html =404;
+
+ location ~* \.(gif|jpe?g|png|webp|ico|svg|css|js|mp4|woff2)$ {
+ expires 1d;
+ }
+ }
+}
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/package.json b/ProductivitySuite/docker/ui/react/package.json
new file mode 100644
index 000000000..322808acc
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/package.json
@@ -0,0 +1,56 @@
+{
+ "name": "ui",
+ "private": true,
+ "version": "0.0.0",
+ "type": "module",
+ "scripts": {
+ "dev": "vite",
+ "build": "tsc && vite build",
+ "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
+ "preview": "vite preview",
+ "test": "vitest run"
+ },
+ "dependencies": {
+ "@mantine/core": "^7.11.1",
+ "@mantine/dropzone": "^7.11.1",
+ "@mantine/hooks": "^7.11.1",
+ "@mantine/notifications": "^7.10.2",
+ "@microsoft/fetch-event-source": "^2.0.1",
+ "@react-keycloak/web": "^3.4.0",
+ "@reduxjs/toolkit": "^2.2.5",
+ "@tabler/icons-react": "^3.5.0",
+ "axios": "^1.7.2",
+ "keycloak-js": "^25.0.2",
+ "luxon": "^3.4.4",
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0",
+ "react-markdown": "^9.0.1",
+ "react-redux": "^9.1.2",
+ "react-router-dom": "^6.25.1",
+ "react-syntax-highlighter": "^15.5.0",
+ "remark-frontmatter": "^5.0.0",
+ "remark-gfm": "^4.0.0"
+ },
+ "devDependencies": {
+ "@testing-library/react": "^16.0.0",
+ "@types/luxon": "^3.4.2",
+ "@types/node": "^20.12.12",
+ "@types/react": "^18.2.66",
+ "@types/react-dom": "^18.2.22",
+ "@types/react-syntax-highlighter": "^15.5.13",
+ "@typescript-eslint/eslint-plugin": "^7.2.0",
+ "@typescript-eslint/parser": "^7.2.0",
+ "@vitejs/plugin-react": "^4.2.1",
+ "eslint": "^8.57.0",
+ "eslint-plugin-react-hooks": "^4.6.0",
+ "eslint-plugin-react-refresh": "^0.4.6",
+ "jsdom": "^24.1.0",
+ "postcss": "^8.4.38",
+ "postcss-preset-mantine": "^1.15.0",
+ "postcss-simple-vars": "^7.0.1",
+ "sass": "1.64.2",
+ "typescript": "^5.2.2",
+ "vite": "^5.2.13",
+ "vitest": "^1.6.0"
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/postcss.config.cjs b/ProductivitySuite/docker/ui/react/postcss.config.cjs
new file mode 100644
index 000000000..e817f567b
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/postcss.config.cjs
@@ -0,0 +1,14 @@
+module.exports = {
+ plugins: {
+ "postcss-preset-mantine": {},
+ "postcss-simple-vars": {
+ variables: {
+ "mantine-breakpoint-xs": "36em",
+ "mantine-breakpoint-sm": "48em",
+ "mantine-breakpoint-md": "62em",
+ "mantine-breakpoint-lg": "75em",
+ "mantine-breakpoint-xl": "88em",
+ },
+ },
+ },
+};
diff --git a/ProductivitySuite/docker/ui/react/src/App.scss b/ProductivitySuite/docker/ui/react/src/App.scss
new file mode 100644
index 000000000..187764a17
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/App.scss
@@ -0,0 +1,42 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "./styles/styles";
+
+.root {
+ @include flex(row, nowrap, flex-start, flex-start);
+}
+
+.layout-wrapper {
+ @include absolutes;
+
+ display: grid;
+
+ width: 100%;
+ height: 100%;
+
+ grid-template-columns: 80px auto;
+ grid-template-rows: 1fr;
+}
+
+/* ===== Scrollbar CSS ===== */
+/* Firefox */
+* {
+ scrollbar-width: thin;
+ scrollbar-color: #d6d6d6 #ffffff;
+}
+
+/* Chrome, Edge, and Safari */
+*::-webkit-scrollbar {
+ width: 8px;
+}
+
+*::-webkit-scrollbar-track {
+ background: #ffffff;
+}
+
+*::-webkit-scrollbar-thumb {
+ background-color: #d6d6d6;
+ border-radius: 16px;
+ border: 4px double #dedede;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/App.tsx b/ProductivitySuite/docker/ui/react/src/App.tsx
new file mode 100644
index 000000000..c12ee1d8f
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/App.tsx
@@ -0,0 +1,68 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import "./App.scss"
+import {MantineProvider } from "@mantine/core"
+import '@mantine/notifications/styles.css';
+import { SideNavbar, SidebarNavList } from "./components/sidebar/sidebar"
+import { IconMessages, IconFileTextAi, IconCode, IconFileInfo, IconDatabaseCog } from "@tabler/icons-react"
+import Conversation from "./components/Conversation/Conversation"
+import { Notifications } from '@mantine/notifications';
+import { BrowserRouter, Route, Routes } from "react-router-dom";
+import CodeGen from "./components/CodeGen/CodeGen";
+import DocSum from "./components/DocSum/DocSum";
+import FaqGen from "./components/FaqGen/FaqGen";
+import { useKeycloak } from "@react-keycloak/web";
+import DataSource from "./components/Conversation/DataSource";
+import { useAppDispatch } from "./redux/store";
+import { setUser } from "./redux/User/userSlice";
+import { useEffect } from "react";
+
+const title = "Chat QnA"
+const navList: SidebarNavList = [
+ { icon: IconMessages, label: "Chat Qna", path: "/", children: },
+ { icon: IconCode, label: "CodeGen", path: "/codegen", children: },
+ { icon: IconFileTextAi, label: "DocSum", path: "/docsum", children: },
+ { icon: IconFileInfo, label: "FaqGen", path: "/faqgen", children: },
+ { icon: IconDatabaseCog, label: "Data Management", path: "/data-management", children: }
+]
+
+function App() {
+ const { keycloak } = useKeycloak();
+ const dispatch = useAppDispatch()
+ useEffect(()=>{
+ dispatch(setUser(keycloak?.idTokenParsed?.preferred_username))
+ },[keycloak.idTokenParsed])
+
+ return (
+ <>
+
+ {!keycloak.authenticated ? (
+ "redirecting to sso ..."
+ ) : (
+
+
+
+
+
+
+
+ {navList.map(tab => {
+ return ( )
+ })}
+
+
+
+
+
+
+
+ )}
+
+
+ >
+ )
+
+}
+
+export default App
diff --git a/ProductivitySuite/docker/ui/react/src/__tests__/util.test.ts b/ProductivitySuite/docker/ui/react/src/__tests__/util.test.ts
new file mode 100644
index 000000000..e67ba2c86
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/__tests__/util.test.ts
@@ -0,0 +1,14 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { describe, expect, test } from "vitest";
+import { getCurrentTimeStamp, uuidv4 } from "../common/util";
+
+describe("unit tests", () => {
+ test("check UUID is of length 36", () => {
+ expect(uuidv4()).toHaveLength(36);
+ });
+ test("check TimeStamp generated is of unix", () => {
+ expect(getCurrentTimeStamp()).toBe(Math.floor(Date.now() / 1000));
+ });
+});
diff --git a/ProductivitySuite/docker/ui/react/src/assets/opea-icon-black.svg b/ProductivitySuite/docker/ui/react/src/assets/opea-icon-black.svg
new file mode 100644
index 000000000..5c96dc762
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/assets/opea-icon-black.svg
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ProductivitySuite/docker/ui/react/src/assets/opea-icon-color.svg b/ProductivitySuite/docker/ui/react/src/assets/opea-icon-color.svg
new file mode 100644
index 000000000..790151171
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/assets/opea-icon-color.svg
@@ -0,0 +1,40 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ProductivitySuite/docker/ui/react/src/assets/react.svg b/ProductivitySuite/docker/ui/react/src/assets/react.svg
new file mode 100644
index 000000000..6c87de9bb
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/assets/react.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/common/client.ts b/ProductivitySuite/docker/ui/react/src/common/client.ts
new file mode 100644
index 000000000..7512f73e3
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/common/client.ts
@@ -0,0 +1,8 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import axios from "axios";
+
+//add iterceptors to add any request headers
+
+export default axios;
diff --git a/ProductivitySuite/docker/ui/react/src/common/util.ts b/ProductivitySuite/docker/ui/react/src/common/util.ts
new file mode 100644
index 000000000..df65b2d8e
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/common/util.ts
@@ -0,0 +1,12 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+export const getCurrentTimeStamp = () => {
+ return Math.floor(Date.now() / 1000);
+};
+
+export const uuidv4 = () => {
+ return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) =>
+ (+c ^ (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (+c / 4)))).toString(16),
+ );
+};
diff --git a/ProductivitySuite/docker/ui/react/src/components/CodeGen/CodeGen.tsx b/ProductivitySuite/docker/ui/react/src/components/CodeGen/CodeGen.tsx
new file mode 100644
index 000000000..29c96f61c
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/CodeGen/CodeGen.tsx
@@ -0,0 +1,140 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { KeyboardEventHandler, SyntheticEvent, useEffect, useRef, useState } from 'react'
+import styleClasses from "./codeGen.module.scss"
+import { ActionIcon, Textarea, Title, rem } from '@mantine/core'
+import { IconArrowRight } from '@tabler/icons-react'
+import { ConversationMessage } from '../Message/conversationMessage'
+import { fetchEventSource } from '@microsoft/fetch-event-source'
+import { CODE_GEN_URL } from '../../config'
+
+
+
+const CodeGen = () => {
+ const [prompt, setPrompt] = useState("")
+ const [submittedPrompt, setSubmittedPrompt] = useState("")
+ const [response,setResponse] = useState("");
+ const promptInputRef = useRef(null)
+ const scrollViewport = useRef(null)
+
+ const toSend = "Enter"
+
+ const handleSubmit = async () => {
+ setResponse("")
+ setSubmittedPrompt(prompt)
+ const body = {
+ messages:prompt
+ }
+ fetchEventSource(CODE_GEN_URL, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ "Accept":"*/*"
+ },
+ body: JSON.stringify(body),
+ openWhenHidden: true,
+ async onopen(response) {
+ if (response.ok) {
+ return;
+ } else if (response.status >= 400 && response.status < 500 && response.status !== 429) {
+ const e = await response.json();
+ console.log(e);
+ throw Error(e.error.message);
+ } else {
+ console.log("error", response);
+ }
+ },
+ onmessage(msg) {
+ if (msg?.data != "[DONE]") {
+ try {
+ const match = msg.data.match(/b'([^']*)'/);
+ if (match && match[1] != "") {
+ const extractedText = match[1].replace(/\\n/g, "\n");
+ setResponse(prev=>prev+extractedText);
+ }
+ } catch (e) {
+ console.log("something wrong in msg", e);
+ throw e;
+ }
+ }
+ },
+ onerror(err) {
+ console.log("error", err);
+ setResponse("")
+ throw err;
+ },
+ onclose() {
+ setPrompt("")
+ },
+ });
+
+ }
+
+ const scrollToBottom = () => {
+ scrollViewport.current!.scrollTo({ top: scrollViewport.current!.scrollHeight })
+ }
+
+ useEffect(() => {
+ scrollToBottom()
+ }, [response])
+
+ const handleKeyDown: KeyboardEventHandler = (event) => {
+ if (!event.shiftKey && event.key === toSend) {
+ handleSubmit()
+ setTimeout(() => {
+ setPrompt("")
+ }, 1)
+ }
+ }
+
+ const handleChange = (event: SyntheticEvent) => {
+ event.preventDefault()
+ setPrompt((event.target as HTMLTextAreaElement).value)
+ }
+ return (
+
+
+
+
+
CodeGen
+
+
+
+ {!submittedPrompt && !response &&
+ (<>
+
Start by asking a question
+ >)
+ }
+ {submittedPrompt && (
+
+ )}
+ {response && (
+
+ )}
+
+
+
+
+
+
+
+ )
+}
+export default CodeGen;
diff --git a/ProductivitySuite/docker/ui/react/src/components/CodeGen/codeGen.module.scss b/ProductivitySuite/docker/ui/react/src/components/CodeGen/codeGen.module.scss
new file mode 100644
index 000000000..acee80b06
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/CodeGen/codeGen.module.scss
@@ -0,0 +1,59 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "../../styles/styles";
+
+.spacer {
+ flex: 1 1 auto;
+}
+
+.codeGenWrapper {
+ @include flex(row, nowrap, flex-start, flex-start);
+ flex: 1 1 auto;
+ height: 100%;
+ & > * {
+ height: 100%;
+ }
+ .codeGenContent {
+ flex: 1 1 auto;
+ position: relative;
+ .codeGenContentMessages {
+ @include absolutes;
+ // @include flex(column, nowrap, flex-start, flex-start);
+
+ display: grid;
+ grid-template-areas:
+ "header"
+ "messages"
+ "inputs";
+
+ grid-template-columns: auto;
+ grid-template-rows: 60px auto 100px;
+
+ .codeGenTitle {
+ grid-area: header;
+ @include flex(row, nowrap, center, flex-start);
+ height: 60px;
+ padding: 8px 24px;
+ border-bottom: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+ }
+
+ .historyContainer {
+ grid-area: messages;
+ overflow: auto;
+ width: 100%;
+ padding: 16px 32px;
+ & > * {
+ width: 100%;
+ }
+ }
+
+ .codeGenActions {
+ // padding: --var()
+ grid-area: inputs;
+ padding: 18px;
+ border-top: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+ }
+ }
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Conversation/Conversation.tsx b/ProductivitySuite/docker/ui/react/src/components/Conversation/Conversation.tsx
new file mode 100644
index 000000000..d4a65bbfd
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Conversation/Conversation.tsx
@@ -0,0 +1,166 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { KeyboardEventHandler, SyntheticEvent, useEffect, useRef, useState } from 'react'
+import styleClasses from "./conversation.module.scss"
+import { ActionIcon, Group, Textarea, Title, rem } from '@mantine/core'
+import { IconArrowRight, IconMessagePlus } from '@tabler/icons-react'
+import { conversationSelector, doConversation, getAllConversations, newConversation } from '../../redux/Conversation/ConversationSlice'
+import { ConversationMessage } from '../Message/conversationMessage'
+import { useAppDispatch, useAppSelector } from '../../redux/store'
+import { Message, MessageRole } from '../../redux/Conversation/Conversation'
+import { getCurrentTimeStamp } from '../../common/util'
+
+import { ConversationSideBar } from './ConversationSideBar'
+import { getPrompts } from '../../redux/Prompt/PromptSlice'
+import { userSelector } from '../../redux/User/userSlice'
+import PromptTemplate from './PromptTemplate'
+
+type ConversationProps = {
+ title: string
+}
+
+const Conversation = ({ title }: ConversationProps) => {
+ const [prompt, setPrompt] = useState("")
+
+ const dispatch = useAppDispatch();
+ const promptInputRef = useRef(null)
+
+ const { conversations, onGoingResult, selectedConversationId,selectedConversationHistory } = useAppSelector(conversationSelector)
+ const { name } = useAppSelector(userSelector)
+ const selectedConversation = conversations.find(x => x.id === selectedConversationId)
+
+ const scrollViewport = useRef(null)
+
+ const toSend = "Enter"
+
+ const systemPrompt: Message = {
+ role: MessageRole.System,
+ content: "You are helpful assistant",
+ };
+
+
+ const handleSubmit = () => {
+
+ const userPrompt: Message = {
+ role: MessageRole.User,
+ content: prompt,
+ time: getCurrentTimeStamp().toString()
+ };
+ let messages: Message[] = [];
+ // if (selectedConversation) {
+ // messages = selectedConversation.Messages.map(message => {
+ // return { role: message.role, content: message.content }
+ // })
+ // }
+
+ messages = [systemPrompt, ...(selectedConversationHistory) ]
+
+ doConversation({
+ conversationId: selectedConversationId,
+ userPrompt,
+ messages,
+ model: "Intel/neural-chat-7b-v3-3",
+ })
+ setPrompt("")
+ }
+
+ const scrollToBottom = () => {
+ scrollViewport.current!.scrollTo({ top: scrollViewport.current!.scrollHeight })
+ }
+
+ useEffect(() => {
+ if(name && name!=""){
+ dispatch(getPrompts({ promptText: "" }))
+ dispatch(getAllConversations({ user: name }))
+ }
+ }, [name]);
+
+ useEffect(() => {
+ scrollToBottom()
+ }, [onGoingResult, selectedConversationHistory])
+
+ const handleKeyDown: KeyboardEventHandler = (event) => {
+ if (!event.shiftKey && event.key === toSend) {
+ handleSubmit()
+ setTimeout(() => {
+ setPrompt("")
+ }, 1)
+ }
+ }
+
+
+
+ const handleNewConversation = () => {
+ dispatch(newConversation())
+ }
+
+ const handleChange = (event: SyntheticEvent) => {
+ event.preventDefault()
+ setPrompt((event.target as HTMLTextAreaElement).value)
+ }
+ return (
+
+
+
+
+
+
{selectedConversation?.first_query || ""}
+
+
+ {(selectedConversation || selectedConversationHistory.length > 0) && (
+
+
+
+ )}
+
+
+
+
+
+
+ {!(selectedConversation || selectedConversationHistory.length > 0) && (
+
+
+
Start by asking a question
+
You can also upload your Document by clicking on Document icon on top right corner
+
+
+
+
+ )}
+
+ {selectedConversationHistory.map((message,index) => {
+ return (message.role!== MessageRole.System && (
))
+ })
+ }
+
+ {onGoingResult && (
+
+ )}
+
+
+
+
+
+
+
+ )
+}
+export default Conversation;
diff --git a/ProductivitySuite/docker/ui/react/src/components/Conversation/ConversationSideBar.tsx b/ProductivitySuite/docker/ui/react/src/components/Conversation/ConversationSideBar.tsx
new file mode 100644
index 000000000..2ab80d820
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Conversation/ConversationSideBar.tsx
@@ -0,0 +1,64 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { ActionIcon, ScrollAreaAutosize, Title } from "@mantine/core"
+
+import contextStyles from "../../styles/components/context.module.scss"
+import { useAppDispatch, useAppSelector } from "../../redux/store"
+import { conversationSelector, deleteConversation, getConversationHistory, setSelectedConversationId } from "../../redux/Conversation/ConversationSlice"
+import { useEffect } from "react"
+import { userSelector } from "../../redux/User/userSlice"
+import { IconTrash } from "@tabler/icons-react"
+// import { userSelector } from "../../redux/User/userSlice"
+
+export interface ConversationContextProps {
+ title: string
+}
+
+export function ConversationSideBar({ title }: ConversationContextProps) {
+ const { conversations, selectedConversationId } = useAppSelector(conversationSelector)
+ const { name } = useAppSelector(userSelector)
+ // const user = useAppSelector(userSelector)
+ const dispatch = useAppDispatch()
+
+ useEffect(() => {
+ if (selectedConversationId != "") {
+ dispatch(getConversationHistory({ user: name, conversationId: selectedConversationId }))
+ }
+ }, [selectedConversationId])
+
+ const handleDeleteConversation = (id : string) => {
+ dispatch(deleteConversation({ user: name, conversationId: id }))
+ }
+
+ const conversationList = conversations?.map((curr) => (
+ {
+ event.preventDefault()
+ dispatch(setSelectedConversationId(curr.id))
+ }}
+ key={curr.id}
+ >
+
{curr.first_query}
+ {selectedConversationId === curr.id && (
+
handleDeleteConversation(curr.id)} size={30} variant="default">
+
+
+ )}
+
+
+ ))
+
+ return (
+
+
+ {title}
+
+
+ {conversationList}
+
+
+ )
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Conversation/DataSource.tsx b/ProductivitySuite/docker/ui/react/src/components/Conversation/DataSource.tsx
new file mode 100644
index 000000000..cc273aa83
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Conversation/DataSource.tsx
@@ -0,0 +1,93 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { ActionIcon, Button, Container, FileInput, Text, TextInput, Title } from '@mantine/core'
+import { IconFile, IconTrash } from '@tabler/icons-react'
+import { SyntheticEvent, useEffect, useState } from 'react'
+import { useAppDispatch, useAppSelector } from '../../redux/store'
+import { conversationSelector, deleteInDataSource, getAllFilesInDataSource, submitDataSourceURL, uploadFile } from '../../redux/Conversation/ConversationSlice'
+import styleClasses from './dataSource.module.scss'
+
+
+
+export default function DataSource() {
+ const [file, setFile] = useState();
+ const [isFile, setIsFile] = useState(true);
+ const [url, setURL] = useState("");
+ const dispatch = useAppDispatch()
+ const { filesInDataSource } = useAppSelector(conversationSelector)
+
+ const handleFileUpload = () => {
+ if (file)
+ dispatch(uploadFile({ file }))
+ }
+
+ const handleChange = (event: SyntheticEvent) => {
+ event.preventDefault()
+ setURL((event.target as HTMLTextAreaElement).value)
+ }
+
+ const handleSubmit = () => {
+ dispatch(submitDataSourceURL({ link_list: url.split(";") }))
+ }
+
+ const handleDelete = (file: string) => {
+ dispatch(deleteInDataSource({file}))
+ }
+
+ useEffect(()=>{
+ dispatch(getAllFilesInDataSource({knowledgeBaseId:"default"}))
+ },[])
+
+ return (
+
+
+ Data Source
+
+
+ Please upload your local file or paste a remote file link, and Chat will respond based on the content of the uploaded file.
+
+
+
+
+
+ setIsFile(true)}>Upload File
+ setIsFile(false)}>Use Link
+
+
+
+
+
+ {isFile ? (
+ <>
+
+ Upload
+ >
+ ) : (
+ <>
+
+ Upload
+ >
+ )}
+
+
+
+
+ Files
+
+ {filesInDataSource.map(file=> {
+ return (
+
+
+
{file.name}
+
handleDelete(file.name)} size={32} variant="default">
+
+
+
+ )})}
+
+
+ )
+}
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/Conversation/PromptTemplate.tsx b/ProductivitySuite/docker/ui/react/src/components/Conversation/PromptTemplate.tsx
new file mode 100644
index 000000000..4acb55743
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Conversation/PromptTemplate.tsx
@@ -0,0 +1,31 @@
+import { useEffect } from "react"
+import { getPrompts, promptSelector } from "../../redux/Prompt/PromptSlice"
+import { useAppDispatch, useAppSelector } from "../../redux/store"
+import styleClasses from './promptTemplate.module.scss'
+import { userSelector } from "../../redux/User/userSlice"
+type PromptTemplateProps = {
+ setPrompt: (prompt:string)=>void;
+}
+function PromptTemplate({setPrompt}:PromptTemplateProps) {
+ const dispatch = useAppDispatch()
+ const {prompts} = useAppSelector(promptSelector)
+ const {name} = useAppSelector(userSelector)
+ useEffect(()=> {
+ if(name && name!=="")
+ dispatch(getPrompts({promptText:""}))
+ },[])
+ return (
+
+ Prompt Templates
+
+ {
+ prompts.map(prompt =>
{
+ setPrompt(prompt.prompt_text)
+
+ }}>{prompt.prompt_text}
)
+ }
+
+ )
+}
+
+export default PromptTemplate
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/Conversation/conversation.module.scss b/ProductivitySuite/docker/ui/react/src/components/Conversation/conversation.module.scss
new file mode 100644
index 000000000..802eee2ec
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Conversation/conversation.module.scss
@@ -0,0 +1,73 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "../../styles/styles";
+
+.spacer {
+ flex: 1 1 auto;
+}
+
+.conversationWrapper {
+ @include flex(row, nowrap, flex-start, flex-start);
+ flex: 1 1 auto;
+ height: 100%;
+ & > * {
+ height: 100%;
+ }
+ .conversationContent {
+ flex: 1 1 auto;
+ position: relative;
+ .conversationContentMessages {
+ @include absolutes;
+ // @include flex(column, nowrap, flex-start, flex-start);
+
+ display: grid;
+ grid-template-areas:
+ "header"
+ "messages"
+ "inputs";
+
+ grid-template-columns: auto;
+ grid-template-rows: 60px 655px 100px;
+
+ .conversationTitle {
+ grid-area: header;
+ @include flex(row, nowrap, center, flex-start);
+ height: 60px;
+ padding: 8px 24px;
+ border-bottom: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+ }
+
+ .historyContainer {
+ grid-area: messages;
+ overflow: auto;
+ width: 100%;
+ // padding: 16px 32px;
+ .newConversation {
+ @include flex(row, nowrap, flex-start, flex-start);
+ .infoMessages {
+ padding: 16px 32px;
+
+ flex: 80;
+ }
+ }
+ & > * {
+ width: 100%;
+ }
+ }
+
+ .conversationActions {
+ // padding: --var()
+ grid-area: inputs;
+ padding: 18px;
+ border-top: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+ }
+ }
+
+ .conversationSplash {
+ @include absolutes;
+ @include flex(column, nowrap, center, center);
+ font-size: 32px;
+ }
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Conversation/dataSource.module.scss b/ProductivitySuite/docker/ui/react/src/components/Conversation/dataSource.module.scss
new file mode 100644
index 000000000..140711a0f
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Conversation/dataSource.module.scss
@@ -0,0 +1,11 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "../../styles/styles";
+
+.dataSourceWrapper {
+ // @include flex(column, nowrap, flex-start, flex-start);
+ // flex: 1 1 auto;
+ height: 100%;
+ padding: 20px;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Conversation/promptTemplate.module.scss b/ProductivitySuite/docker/ui/react/src/components/Conversation/promptTemplate.module.scss
new file mode 100644
index 000000000..5e133a3de
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Conversation/promptTemplate.module.scss
@@ -0,0 +1,14 @@
+.promptContainer {
+ flex: 20;
+ border-left: 1px solid var(--mantine-color-gray-3);
+ height: 82vh;
+ padding: 8px;
+ .prompt {
+ // height: %;
+ margin-top: 8px;
+ margin-bottom: 8px;
+ cursor: pointer;
+ padding: 8px;
+ border: 1px solid var(--mantine-color-gray-3);
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/DocSum/DocSum.tsx b/ProductivitySuite/docker/ui/react/src/components/DocSum/DocSum.tsx
new file mode 100644
index 000000000..9e7472c65
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/DocSum/DocSum.tsx
@@ -0,0 +1,153 @@
+import styleClasses from './docSum.module.scss'
+import { Button, Text, Textarea, Title } from '@mantine/core'
+import { FileUpload } from './FileUpload'
+import { useEffect, useState } from 'react'
+import Markdown from '../Shared/Markdown/Markdown'
+import { fetchEventSource } from '@microsoft/fetch-event-source'
+import { notifications } from '@mantine/notifications'
+import { DOC_SUM_URL } from '../../config'
+import { FileWithPath } from '@mantine/dropzone'
+
+
+const DocSum = () => {
+ const [isFile, setIsFile] = useState(false);
+ const [files, setFiles] = useState([])
+ const [isGenerating, setIsGenerating] = useState(false);
+ const [value, setValue] = useState('');
+ const [fileContent, setFileContent] = useState('');
+ const [response, setResponse] = useState('');
+
+ useEffect(() => {
+ if(isFile){
+ setValue('')
+ }
+ },[isFile])
+
+ useEffect(()=>{
+ if (files.length) {
+ const reader = new FileReader()
+ reader.onload = async () => {
+ const text = reader.result?.toString()
+ setFileContent(text || '')
+ };
+ reader.readAsText(files[0])
+ }
+ },[files])
+
+
+ const handleSubmit = async () => {
+ setResponse("")
+ if(!isFile && !value){
+ notifications.show({
+ color: "red",
+ id: "input",
+ message: "Please Upload Content",
+ })
+ return
+ }
+
+ setIsGenerating(true)
+ const body = {
+ messages: isFile ? fileContent : value
+ }
+ fetchEventSource(DOC_SUM_URL, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ "Accept": "*/*"
+ },
+ body: JSON.stringify(body),
+ openWhenHidden: true,
+ async onopen(response) {
+ if (response.ok) {
+ return;
+ } else if (response.status >= 400 && response.status < 500 && response.status !== 429) {
+ const e = await response.json();
+ console.log(e);
+ throw Error(e.error.message);
+ } else {
+ console.log("error", response);
+ }
+ },
+ onmessage(msg) {
+ if (msg?.data != "[DONE]") {
+ try {
+ const res = JSON.parse(msg.data)
+ const logs = res.ops;
+ logs.forEach((log: { op: string; path: string; value: string }) => {
+ if (log.op === "add") {
+ if (
+ log.value !== "" && log.path.endsWith("/streamed_output/-") && log.path.length > "/streamed_output/-".length
+ ) {
+ setResponse(prev=>prev+log.value);
+ }
+ }
+ });
+ } catch (e) {
+ console.log("something wrong in msg", e);
+ throw e;
+ }
+ }
+ },
+ onerror(err) {
+ console.log("error", err);
+ setIsGenerating(false)
+ throw err;
+ },
+ onclose() {
+ setIsGenerating(false)
+ },
+ });
+}
+
+
+ return (
+
+
+
+
+
Doc Summary
+
+
+ Please upload file or paste content for summarization.
+
+
+
+ setIsFile(false)}>Paste Text
+ setIsFile(true)}>Upload File
+
+
+
+ {isFile ? (
+
+ { setFiles(files) }} />
+
+ ) : (
+
+
+ )}
+
+
+ Generate Summary
+
+ {response && (
+
+
+
+ )}
+
+
+
+
+ )
+}
+
+export default DocSum
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/DocSum/FileUpload.tsx b/ProductivitySuite/docker/ui/react/src/components/DocSum/FileUpload.tsx
new file mode 100644
index 000000000..914ac8724
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/DocSum/FileUpload.tsx
@@ -0,0 +1,79 @@
+import { Group, Text, rem } from '@mantine/core';
+import { IconUpload, IconCloudUpload, IconX, IconFile } from '@tabler/icons-react';
+import { } from '@tabler/icons-react';
+import { Dropzone, DropzoneProps, FileWithPath } from '@mantine/dropzone';
+import '@mantine/dropzone/styles.css';
+import { useState } from 'react';
+
+export function FileUpload(props: Partial) {
+ const [files, setFiles] = useState([])
+ return (
+ { setFiles(files) }}
+ onReject={() => { }}
+ maxSize={5 * 1024 ** 2}
+ multiple={false}
+ accept={[
+ // MIME_TYPES.png,
+ // MIME_TYPES.jpeg,
+ // MIME_TYPES.svg,
+ // MIME_TYPES.gif,
+ // MIME_TYPES.webp,
+ // MIME_TYPES.doc,
+ // MIME_TYPES.docx,
+ // MIME_TYPES.pdf,
+ // MIME_TYPES.xls,
+ // MIME_TYPES.xlsx,
+ "text/plain",
+ // "application/json"
+ ]}
+ style={{ height: '220px', width: '100%', borderColor: 'var(--mantine-color-blue-6)' }}
+ {...props}
+ >
+
+
+
+
+
+
+
+
+ {files.length > 0 ? ( ) : ( )}
+
+ {files.length > 0 ? (
+
+ {files.map(file => (
+
+ {file.name}
+
+ ))}
+
+
+ ) : (
+
+
+ Drag your file here or click to select file
+
+
+ .txt
+ {/* pdf, txt, doc, docx, png, jpg ..so on */}
+
+
+
+ )}
+
+
+ );
+}
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/DocSum/docSum.module.scss b/ProductivitySuite/docker/ui/react/src/components/DocSum/docSum.module.scss
new file mode 100644
index 000000000..399e97939
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/DocSum/docSum.module.scss
@@ -0,0 +1,44 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "../../styles/styles";
+
+.spacer {
+ flex: 1 1 auto;
+}
+
+.docSumWrapper {
+ @include flex(row, nowrap, flex-start, flex-start);
+ flex: 1 1 auto;
+ height: 100%;
+ & > * {
+ height: 100%;
+ }
+ .docSumContent {
+ flex: 1 1 auto;
+ position: relative;
+ .docSumContentMessages {
+ @include absolutes;
+ @include flex(column, nowrap, normal, normal);
+ > * {
+ padding: 8px 24px;
+ }
+ .docSumTitle {
+ @include flex(row, nowrap, center, flex-start);
+ height: 60px;
+ border-bottom: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+ }
+ .docSumContentButtonGroup {
+ @include flex(row, nowrap, center, center);
+ height: 60px;
+ }
+ .docSumInput {
+ .docSumFileUpload {
+ @include flex(row, nowrap, center, center);
+ }
+ }
+ .docSumResult {
+ }
+ }
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/FaqGen/FaqGen.tsx b/ProductivitySuite/docker/ui/react/src/components/FaqGen/FaqGen.tsx
new file mode 100644
index 000000000..ca731cbf8
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/FaqGen/FaqGen.tsx
@@ -0,0 +1,167 @@
+import styleClasses from './faqGen.module.scss'
+import { Button, Text, Textarea, Title } from '@mantine/core'
+import { FileUpload } from './FileUpload'
+import { useEffect, useState } from 'react'
+import Markdown from '../Shared/Markdown/Markdown'
+import { fetchEventSource } from '@microsoft/fetch-event-source'
+import { notifications } from '@mantine/notifications'
+import { FAQ_GEN_URL } from '../../config'
+import { FileWithPath } from '@mantine/dropzone'
+
+
+const FaqGen = () => {
+ const [isFile, setIsFile] = useState(false);
+ const [files, setFiles] = useState([])
+ const [isGenerating, setIsGenerating] = useState(false);
+ const [value, setValue] = useState('');
+ const [fileContent, setFileContent] = useState('');
+ const [response, setResponse] = useState('');
+
+ let messagesEnd:HTMLDivElement;
+
+ const scrollToView = () => {
+ if (messagesEnd) {
+ messagesEnd.scrollTop = messagesEnd.scrollHeight;
+ }
+ };
+ useEffect(()=>{
+ scrollToView()
+ },[response])
+
+ useEffect(() => {
+ if(isFile){
+ setValue('')
+ }
+ },[isFile])
+
+ useEffect(()=>{
+ if (files.length) {
+ const reader = new FileReader()
+ reader.onload = async () => {
+ const text = reader.result?.toString()
+ setFileContent(text || '')
+ };
+ reader.readAsText(files[0])
+ }
+ },[files])
+
+
+ const handleSubmit = async () => {
+ setResponse("")
+ if(!isFile && !value){
+ notifications.show({
+ color: "red",
+ id: "input",
+ message: "Please Upload Content",
+ })
+ return
+ }
+
+ setIsGenerating(true)
+ const body = {
+ messages: isFile ? fileContent : value
+ }
+ fetchEventSource(FAQ_GEN_URL, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ "Accept": "*/*"
+ },
+ body: JSON.stringify(body),
+ openWhenHidden: true,
+ async onopen(response) {
+ if (response.ok) {
+ return;
+ } else if (response.status >= 400 && response.status < 500 && response.status !== 429) {
+ const e = await response.json();
+ console.log(e);
+ throw Error(e.error.message);
+ } else {
+ console.log("error", response);
+ }
+ },
+ onmessage(msg) {
+ if (msg?.data != "[DONE]") {
+ try {
+ const res = JSON.parse(msg.data)
+ const logs = res.ops;
+ logs.forEach((log: { op: string; path: string; value: string }) => {
+ if (log.op === "add") {
+ if (
+ log.value !== "" && log.path.endsWith("/streamed_output/-") && log.path.length > "/streamed_output/-".length
+ ) {
+ setResponse(prev=>prev+log.value);
+ }
+ }
+ });
+ } catch (e) {
+ console.log("something wrong in msg", e);
+ throw e;
+ }
+ }
+ },
+ onerror(err) {
+ console.log("error", err);
+ setIsGenerating(false)
+ throw err;
+ },
+ onclose() {
+ setIsGenerating(false)
+ },
+ });
+}
+
+
+ return (
+
+
+
+
+
FAQ Generator
+
+
+ Please upload file or paste content for generating Faq's.
+
+
+
+ setIsFile(false)}>Paste Text
+ setIsFile(true)}>Upload File
+
+
+
+ {isFile ? (
+
+ { setFiles(files) }} />
+
+ ) : (
+
+
+ )}
+
+
+ Generate FAQ's
+
+ {response && (
+
{
+ if(el)
+ messagesEnd = el;
+ }}>
+
+
+ )}
+
+
+
+
+ )
+}
+
+export default FaqGen;
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/FaqGen/FileUpload.tsx b/ProductivitySuite/docker/ui/react/src/components/FaqGen/FileUpload.tsx
new file mode 100644
index 000000000..914ac8724
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/FaqGen/FileUpload.tsx
@@ -0,0 +1,79 @@
+import { Group, Text, rem } from '@mantine/core';
+import { IconUpload, IconCloudUpload, IconX, IconFile } from '@tabler/icons-react';
+import { } from '@tabler/icons-react';
+import { Dropzone, DropzoneProps, FileWithPath } from '@mantine/dropzone';
+import '@mantine/dropzone/styles.css';
+import { useState } from 'react';
+
+export function FileUpload(props: Partial) {
+ const [files, setFiles] = useState([])
+ return (
+ { setFiles(files) }}
+ onReject={() => { }}
+ maxSize={5 * 1024 ** 2}
+ multiple={false}
+ accept={[
+ // MIME_TYPES.png,
+ // MIME_TYPES.jpeg,
+ // MIME_TYPES.svg,
+ // MIME_TYPES.gif,
+ // MIME_TYPES.webp,
+ // MIME_TYPES.doc,
+ // MIME_TYPES.docx,
+ // MIME_TYPES.pdf,
+ // MIME_TYPES.xls,
+ // MIME_TYPES.xlsx,
+ "text/plain",
+ // "application/json"
+ ]}
+ style={{ height: '220px', width: '100%', borderColor: 'var(--mantine-color-blue-6)' }}
+ {...props}
+ >
+
+
+
+
+
+
+
+
+ {files.length > 0 ? ( ) : ( )}
+
+ {files.length > 0 ? (
+
+ {files.map(file => (
+
+ {file.name}
+
+ ))}
+
+
+ ) : (
+
+
+ Drag your file here or click to select file
+
+
+ .txt
+ {/* pdf, txt, doc, docx, png, jpg ..so on */}
+
+
+
+ )}
+
+
+ );
+}
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/FaqGen/faqGen.module.scss b/ProductivitySuite/docker/ui/react/src/components/FaqGen/faqGen.module.scss
new file mode 100644
index 000000000..3c76bbf62
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/FaqGen/faqGen.module.scss
@@ -0,0 +1,45 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "../../styles/styles";
+
+.spacer {
+ flex: 1 1 auto;
+}
+
+.faqGenWrapper {
+ @include flex(row, nowrap, flex-start, flex-start);
+ flex: 1 1 auto;
+ height: 100%;
+ & > * {
+ height: 100%;
+ }
+ .faqGenContent {
+ flex: 1 1 auto;
+ position: relative;
+ .faqGenContentMessages {
+ @include absolutes;
+ @include flex(column, nowrap, normal, normal);
+ > * {
+ padding: 8px 24px;
+ }
+ .faqGenTitle {
+ @include flex(row, nowrap, center, flex-start);
+ height: 60px;
+ border-bottom: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+ }
+ .faqGenContentButtonGroup {
+ @include flex(row, nowrap, center, center);
+ height: 60px;
+ }
+ .faqGenInput {
+ .faqGenFileUpload {
+ @include flex(row, nowrap, center, center);
+ }
+ }
+ .faqGenResult {
+ overflow-y: auto;
+ }
+ }
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Message/conversationMessage.module.scss b/ProductivitySuite/docker/ui/react/src/components/Message/conversationMessage.module.scss
new file mode 100644
index 000000000..b00649553
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Message/conversationMessage.module.scss
@@ -0,0 +1,15 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "../../styles/styles";
+
+.conversationMessage {
+ @include flex(column, nowrap, flex-start, flex-start);
+ margin-top: 16px;
+ padding: 0 32px;
+ width: 100%;
+
+ & > * {
+ width: 100%;
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Message/conversationMessage.tsx b/ProductivitySuite/docker/ui/react/src/components/Message/conversationMessage.tsx
new file mode 100644
index 000000000..e7d5cdae7
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Message/conversationMessage.tsx
@@ -0,0 +1,62 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { IconAi, IconPlus, IconUser } from "@tabler/icons-react"
+import style from "./conversationMessage.module.scss"
+import { ActionIcon, Group, Text, Tooltip } from "@mantine/core"
+import { DateTime } from "luxon"
+import Markdown from "../Shared/Markdown/Markdown"
+import { addPrompt } from "../../redux/Prompt/PromptSlice"
+import { useAppDispatch } from "../../redux/store"
+
+export interface ConversationMessageProps {
+ message: string
+ human: boolean
+ date: number
+}
+
+export function ConversationMessage({ human, message, date }: ConversationMessageProps) {
+ const dispatch = useAppDispatch();
+ const dateFormat = () => {
+ // console.log(date)
+ // console.log(new Date(date))
+ return DateTime.fromJSDate(new Date(date)).toLocaleString(DateTime.DATETIME_MED)
+ }
+
+ return (
+
+
+ {human && }
+ {!human && }
+
+
+
+ {human && "You"} {!human && "Assistant"}
+
+
+ {dateFormat()}
+
+
+ {human && (
+
+ dispatch(addPrompt({promptText:message}))
+ } size={20} variant="filled">
+
+
+ )
+ }
+
+
+ {human? message : ()}
+
+
+ {/*
+ {human && }
+ {!human && }
+
+
+
{message}
*/}
+
+ )
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Shared/CodeRender/CodeRender.tsx b/ProductivitySuite/docker/ui/react/src/components/Shared/CodeRender/CodeRender.tsx
new file mode 100644
index 000000000..479034cec
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Shared/CodeRender/CodeRender.tsx
@@ -0,0 +1,52 @@
+import styles from './codeRender.module.scss'
+import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
+import { tomorrow } from "react-syntax-highlighter/dist/esm/styles/prism";
+import { IconCopy } from '@tabler/icons-react';
+import { Button, CopyButton } from '@mantine/core';
+
+type CodeRenderProps = {
+ cleanCode: React.ReactNode,
+ language: string,
+ inline: boolean
+}
+const CodeRender = ({ cleanCode, language, inline }:CodeRenderProps) => {
+ cleanCode = String(cleanCode).replace(/\n$/, '').replace(/^\s*[\r\n]/gm, '') //right trim and remove empty lines from the input
+ console.log(styles)
+ try {
+ return inline ? ({cleanCode}
) : (
+
+
+
+ {language || "language not detected"}
+
+
+
+ {({ copied, copy }) => (
+ } onClick={copy}>
+ {copied ? 'Copied' : 'Copy'}
+
+ )}
+
+
+
+
+
)
+ } catch (err) {
+ return (
+
+ {cleanCode}
+
+ )
+ }
+
+}
+
+
+export default CodeRender;
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/Shared/CodeRender/codeRender.module.scss b/ProductivitySuite/docker/ui/react/src/components/Shared/CodeRender/codeRender.module.scss
new file mode 100644
index 000000000..a62f00d40
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Shared/CodeRender/codeRender.module.scss
@@ -0,0 +1,23 @@
+@import "../../../styles/styles";
+
+.code {
+ margin: 7px 0px;
+ .codeHead {
+ background: #379af1;
+
+ padding: 0px 10px !important;
+ @include flex(row, nowrap, center, space-between);
+ .codeTitle {
+ }
+ .codeActionGroup {
+ @include flex(row, nowrap, center, flex-start);
+ }
+ }
+ .codeHighlighterDiv {
+ margin: 0px !important;
+ }
+}
+
+.inlineCode {
+ background: #d7d7d7;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/Shared/Markdown/Markdown.tsx b/ProductivitySuite/docker/ui/react/src/components/Shared/Markdown/Markdown.tsx
new file mode 100644
index 000000000..6331c6d08
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Shared/Markdown/Markdown.tsx
@@ -0,0 +1,62 @@
+import markdownStyles from './markdown.module.scss'
+import ReactMarkdown from 'react-markdown';
+import remarkGfm from 'remark-gfm';
+import remarkFrontmatter from 'remark-frontmatter';
+// import Mermaid from '../../shared/Mermaid/Mermaid';
+import CodeRender from '../CodeRender/CodeRender';
+
+type MarkdownProps = {
+ content: string
+}
+const Markdown = ({ content }: MarkdownProps) => {
+ return (
+ {
+ return (
+
+ {children}
+
+ );
+ },
+ a: ({ children, ...props }) => {
+ return (
+
+ {children}
+
+ );
+ },
+ table: ({ children, ...props }) => {
+ return (
+
+ );
+ },
+ //@ts-expect-error inline can undefined sometimes
+ code({ inline, className, children, }) {
+ const lang = /language-(\w+)/.exec(className || '')
+ // if (lang && lang[1] === "mermaid") {
+ // return
+ // }
+ return
+ }
+ }}
+ />)
+}
+
+export default Markdown;
\ No newline at end of file
diff --git a/ProductivitySuite/docker/ui/react/src/components/Shared/Markdown/markdown.module.scss b/ProductivitySuite/docker/ui/react/src/components/Shared/Markdown/markdown.module.scss
new file mode 100644
index 000000000..e796f836f
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/Shared/Markdown/markdown.module.scss
@@ -0,0 +1,14 @@
+.tableDiv {
+ table,
+ th,
+ td {
+ border: 1px solid black;
+ border-collapse: collapse;
+ }
+}
+
+.md {
+ li {
+ margin-left: 35px; /* Adjust the value based on your preference */
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/sidebar/sidebar.module.scss b/ProductivitySuite/docker/ui/react/src/components/sidebar/sidebar.module.scss
new file mode 100644
index 000000000..cf8061e1e
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/sidebar/sidebar.module.scss
@@ -0,0 +1,87 @@
+/**
+ Copyright (c) 2024 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ **/
+
+@import "../../styles/styles";
+
+.navbar {
+ width: 100%;
+ @include flex(column, nowrap, center, flex-start);
+ padding: var(--mantine-spacing-md);
+ background-color: var(--mantine-color-blue-filled);
+ // background-color: light-dark(var(--mantine-color-white), var(--mantine-color-dark-6));
+ // border-right: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+}
+
+.navbarMain {
+ // flex: 1;
+ height: 655px;
+}
+
+.navbarLogo {
+ width: 100%;
+ display: flex;
+ justify-content: center;
+ padding-top: var(--mantine-spacing-md);
+ margin-bottom: var(--mantine-spacing-xl);
+}
+
+.link {
+ width: 44px;
+ height: 44px;
+ border-radius: var(--mantine-radius-md);
+ text-align: center;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ color: var(--mantine-color-white);
+
+ &:hover {
+ background-color: var(--mantine-color-blue-7);
+ }
+
+ &[data-active] {
+ &,
+ &:hover {
+ box-shadow: var(--mantine-shadow-sm);
+ background-color: var(--mantine-color-white);
+ color: var(--mantine-color-blue-6);
+ }
+ }
+}
+
+.aside {
+ flex: 0 0 60px;
+ background-color: var(--mantine-color-body);
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ border-right: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-7));
+}
+
+.logo {
+ width: 100%;
+ display: flex;
+ justify-content: center;
+ height: 60px;
+ padding-top: var(--mantine-spacing-s);
+ border-bottom: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-7));
+ margin-bottom: var(--mantine-spacing-xl);
+}
+.logoImg {
+ width: 30px;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/components/sidebar/sidebar.tsx b/ProductivitySuite/docker/ui/react/src/components/sidebar/sidebar.tsx
new file mode 100644
index 000000000..e35b4f011
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/components/sidebar/sidebar.tsx
@@ -0,0 +1,87 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { useState } from "react"
+import { Tooltip, UnstyledButton, Stack, rem } from "@mantine/core"
+import { IconHome2, IconLogout } from "@tabler/icons-react"
+import classes from "./sidebar.module.scss"
+import OpeaLogo from "../../assets/opea-icon-color.svg"
+import { useAppDispatch } from "../../redux/store"
+import { removeUser } from "../../redux/User/userSlice"
+import { logout } from "../../redux/Conversation/ConversationSlice"
+import { useNavigate } from "react-router-dom"
+import { clearPrompts } from "../../redux/Prompt/PromptSlice"
+import { useKeycloak } from "@react-keycloak/web"
+
+interface NavbarLinkProps {
+ icon: typeof IconHome2
+ label: string
+ path?: string
+ active?: boolean
+ onClick?(): void
+}
+
+function NavbarLink({ icon: Icon, label, active,path , onClick }: NavbarLinkProps) {
+ const navigate = useNavigate();
+
+ return (
+
+ {onClick ? onClick() : navigate(path || "")}} className={classes.link} data-active={active || undefined}>
+
+ {/* {label} */}
+
+
+ )
+}
+
+export interface SidebarNavItem {
+ icon: typeof IconHome2
+ label: string,
+ path:string,
+ children: React.ReactNode
+}
+
+export type SidebarNavList = SidebarNavItem[]
+
+export interface SideNavbarProps {
+ navList: SidebarNavList
+}
+
+export function SideNavbar({ navList }: SideNavbarProps) {
+ const dispatch =useAppDispatch()
+ const [active, setActive] = useState(0)
+ const navigate = useNavigate();
+ const {keycloak} = useKeycloak()
+
+
+ const handleLogout = () => {
+ dispatch(logout())
+ dispatch(removeUser())
+ dispatch(clearPrompts())
+ keycloak.logout({})
+ }
+
+ const links = navList.map((link, index) => (
+ {
+ setActive(index)
+ navigate(link.path)
+ }} />
+ ))
+
+ return (
+
+
+
+
+
+
+
+ {links}
+
+
+
+
+
+
+ )
+}
diff --git a/ProductivitySuite/docker/ui/react/src/config.ts b/ProductivitySuite/docker/ui/react/src/config.ts
new file mode 100644
index 000000000..a17ae2bf1
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/config.ts
@@ -0,0 +1,19 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+console.log(import.meta.env.VITE_KEYCLOAK_SERVICE_ENDPOINT);
+export const CHAT_QNA_URL = import.meta.env.VITE_BACKEND_SERVICE_ENDPOINT_CHATQNA;
+export const CODE_GEN_URL = import.meta.env.VITE_BACKEND_SERVICE_ENDPOINT_CODEGEN;
+export const DOC_SUM_URL = import.meta.env.VITE_BACKEND_SERVICE_ENDPOINT_DOCSUM;
+export const FAQ_GEN_URL = import.meta.env.VITE_BACKEND_SERVICE_ENDPOINT_FAQGEN;
+export const KEYCLOACK_SERVICE_URL = import.meta.env.VITE_KEYCLOAK_SERVICE_ENDPOINT;
+
+export const DATA_PREP_URL = import.meta.env.VITE_DATAPREP_SERVICE_ENDPOINT;
+export const DATA_PREP_GET_URL = import.meta.env.VITE_DATAPREP_GET_FILE_ENDPOINT;
+export const DATA_PREP_DELETE_URL = import.meta.env.VITE_DATAPREP_DELETE_FILE_ENDPOINT;
+
+export const CHAT_HISTORY_CREATE = import.meta.env.VITE_CHAT_HISTORY_CREATE_ENDPOINT;
+export const CHAT_HISTORY_GET = import.meta.env.VITE_CHAT_HISTORY_GET_ENDPOINT;
+export const CHAT_HISTORY_DELETE = import.meta.env.VITE_CHAT_HISTORY_DELETE_ENDPOINT;
+export const PROMPT_MANAGER_GET = import.meta.env.VITE_PROMPT_SERVICE_GET_ENDPOINT;
+export const PROMPT_MANAGER_CREATE = import.meta.env.VITE_PROMPT_SERVICE_CREATE_ENDPOINT;
diff --git a/ProductivitySuite/docker/ui/react/src/index.scss b/ProductivitySuite/docker/ui/react/src/index.scss
new file mode 100644
index 000000000..53e71621e
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/index.scss
@@ -0,0 +1,20 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "@mantine/core/styles.css";
+
+:root {
+ font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif;
+ line-height: 1.5;
+ font-weight: 400;
+}
+
+html,
+body {
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ overflow: hidden;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/keycloack.ts b/ProductivitySuite/docker/ui/react/src/keycloack.ts
new file mode 100644
index 000000000..26ba47511
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/keycloack.ts
@@ -0,0 +1,12 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import Keycloak from "keycloak-js";
+import { KEYCLOACK_SERVICE_URL } from "./config";
+const keycloak = new Keycloak({
+ url: KEYCLOACK_SERVICE_URL,
+ realm: "productivitysuite",
+ clientId: "productivitysuite",
+});
+
+export default keycloak;
diff --git a/ProductivitySuite/docker/ui/react/src/main.tsx b/ProductivitySuite/docker/ui/react/src/main.tsx
new file mode 100644
index 000000000..a1949c00f
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/main.tsx
@@ -0,0 +1,22 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import ReactDOM from "react-dom/client"
+import App from "./App.tsx"
+import "./index.scss"
+import { Provider } from 'react-redux'
+import { store } from "./redux/store.ts"
+import keycloak from "./keycloack.ts"
+import { ReactKeycloakProvider } from "@react-keycloak/web";
+
+
+
+ReactDOM.createRoot(document.getElementById("root")!).render(
+ <>
+
+
+
+
+
+ >
+)
diff --git a/ProductivitySuite/docker/ui/react/src/redux/Conversation/Conversation.ts b/ProductivitySuite/docker/ui/react/src/redux/Conversation/Conversation.ts
new file mode 100644
index 000000000..ce311cbc1
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/redux/Conversation/Conversation.ts
@@ -0,0 +1,37 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+export type ConversationRequest = {
+ conversationId: string;
+ userPrompt: Message;
+ messages: Message[];
+ model: string;
+};
+export enum MessageRole {
+ Assistant = "assistant",
+ User = "user",
+ System = "system",
+}
+
+export interface Message {
+ role: MessageRole;
+ content: string;
+ time?: string;
+}
+
+export interface Conversation {
+ id: string;
+ first_query?: string;
+}
+
+type file = {
+ name: string;
+};
+
+export interface ConversationReducer {
+ selectedConversationId: string;
+ conversations: Conversation[];
+ selectedConversationHistory: Message[];
+ onGoingResult: string;
+ filesInDataSource: file[];
+}
diff --git a/ProductivitySuite/docker/ui/react/src/redux/Conversation/ConversationSlice.ts b/ProductivitySuite/docker/ui/react/src/redux/Conversation/ConversationSlice.ts
new file mode 100644
index 000000000..2076ad1d7
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/redux/Conversation/ConversationSlice.ts
@@ -0,0 +1,299 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { PayloadAction, createSlice } from "@reduxjs/toolkit";
+import { RootState, store } from "../store";
+import { fetchEventSource } from "@microsoft/fetch-event-source";
+import { Message, MessageRole, ConversationReducer, ConversationRequest, Conversation } from "./Conversation";
+import { getCurrentTimeStamp } from "../../common/util";
+import { createAsyncThunkWrapper } from "../thunkUtil";
+import client from "../../common/client";
+import { notifications } from "@mantine/notifications";
+import {
+ CHAT_QNA_URL,
+ DATA_PREP_URL,
+ DATA_PREP_GET_URL,
+ DATA_PREP_DELETE_URL,
+ CHAT_HISTORY_CREATE,
+ CHAT_HISTORY_GET,
+ CHAT_HISTORY_DELETE,
+} from "../../config";
+
+const initialState: ConversationReducer = {
+ conversations: [],
+ selectedConversationId: "",
+ selectedConversationHistory: [],
+ onGoingResult: "",
+ filesInDataSource: [],
+};
+
+export const ConversationSlice = createSlice({
+ name: "Conversation",
+ initialState,
+ reducers: {
+ logout: (state) => {
+ state.conversations = [];
+ state.selectedConversationId = "";
+ state.onGoingResult = "";
+ state.selectedConversationHistory = [];
+ state.filesInDataSource = [];
+ },
+
+ setOnGoingResult: (state, action: PayloadAction) => {
+ state.onGoingResult = action.payload;
+ },
+ addMessageToMessages: (state, action: PayloadAction) => {
+ state.selectedConversationHistory.push(action.payload);
+ },
+ newConversation: (state) => {
+ (state.selectedConversationId = ""), (state.onGoingResult = ""), (state.selectedConversationHistory = []);
+ },
+ setSelectedConversationId: (state, action: PayloadAction) => {
+ state.selectedConversationId = action.payload;
+ },
+ },
+ extraReducers(builder) {
+ builder.addCase(uploadFile.fulfilled, () => {
+ notifications.update({
+ id: "upload-file",
+ message: "File Uploaded Successfully",
+ loading: false,
+ autoClose: 3000,
+ });
+ });
+ builder.addCase(uploadFile.rejected, () => {
+ notifications.update({
+ color: "red",
+ id: "upload-file",
+ message: "Failed to Upload file",
+ loading: false,
+ });
+ });
+
+ builder.addCase(submitDataSourceURL.fulfilled, () => {
+ notifications.show({
+ message: "Submitted Successfully",
+ });
+ });
+ builder.addCase(submitDataSourceURL.rejected, () => {
+ notifications.show({
+ color: "red",
+ message: "Submit Failed",
+ });
+ });
+
+ builder.addCase(getAllConversations.fulfilled, (state, action) => {
+ state.conversations = action.payload;
+ });
+
+ builder.addCase(getConversationHistory.fulfilled, (state, action) => {
+ state.selectedConversationHistory = action.payload;
+ });
+
+ builder.addCase(saveConversationtoDatabase.fulfilled, (state, action) => {
+ if (state.selectedConversationId == "") {
+ state.selectedConversationId = action.payload;
+ state.conversations.push({
+ id: action.payload,
+ first_query: state.selectedConversationHistory[1].content,
+ });
+ }
+ });
+ builder.addCase(getAllFilesInDataSource.fulfilled, (state, action) => {
+ state.filesInDataSource = action.payload;
+ });
+ builder.addCase(deleteConversation.fulfilled, () => {
+ notifications.show({
+ message: "Conversation Deleted Successfully",
+ });
+ });
+ },
+});
+
+export const submitDataSourceURL = createAsyncThunkWrapper(
+ "conversation/submitDataSourceURL",
+ async ({ link_list }: { link_list: string[] }, { dispatch }) => {
+ const body = new FormData();
+ body.append("link_list", JSON.stringify(link_list));
+ const response = await client.post(DATA_PREP_URL, body);
+ dispatch(getAllFilesInDataSource({ knowledgeBaseId: "default" }));
+ return response.data;
+ },
+);
+
+export const getAllFilesInDataSource = createAsyncThunkWrapper(
+ "conversation/getAllFilesInDataSource",
+ async ({ knowledgeBaseId }: { knowledgeBaseId: string }, {}) => {
+ const body = {
+ knowledge_base_id: knowledgeBaseId,
+ };
+ const response = await client.post(DATA_PREP_GET_URL, body);
+ return response.data;
+ },
+);
+export const uploadFile = createAsyncThunkWrapper(
+ "conversation/uploadFile",
+ async ({ file }: { file: File }, { dispatch }) => {
+ const body = new FormData();
+ body.append("files", file);
+
+ notifications.show({
+ id: "upload-file",
+ message: "uploading File",
+ loading: true,
+ });
+ const response = await client.post(DATA_PREP_URL, body);
+ dispatch(getAllFilesInDataSource({ knowledgeBaseId: "default" }));
+ return response.data;
+ },
+);
+
+export const deleteInDataSource = createAsyncThunkWrapper(
+ "conversation/deleteInDataSource",
+ async ({ file }: { file: any }, { dispatch }) => {
+ const response = await client.post(DATA_PREP_DELETE_URL, {
+ file_path: file,
+ });
+ dispatch(getAllFilesInDataSource({ knowledgeBaseId: "default" }));
+ return response.data;
+ },
+);
+
+export const saveConversationtoDatabase = createAsyncThunkWrapper(
+ "conversation/saveConversationtoDatabase",
+ async ({ conversation }: { conversation: Conversation }, { getState }) => {
+ // @ts-ignore
+ const state: RootState = getState();
+ const selectedConversationHistory = state.conversationReducer.selectedConversationHistory;
+ const response = await client.post(CHAT_HISTORY_CREATE, {
+ data: {
+ user: state.userReducer.name,
+ messages: selectedConversationHistory,
+ },
+ id: conversation.id == "" ? null : conversation.id,
+ first_query: selectedConversationHistory[1].content,
+ });
+ return response.data;
+ },
+);
+
+export const getAllConversations = createAsyncThunkWrapper(
+ "conversation/getAllConversations",
+ async ({ user }: { user: string }, {}) => {
+ const response = await client.post(CHAT_HISTORY_GET, {
+ user,
+ });
+ return response.data;
+ },
+);
+
+export const getConversationHistory = createAsyncThunkWrapper(
+ "conversation/getConversationHistory",
+ async ({ user, conversationId }: { user: string; conversationId: string }, {}) => {
+ const response = await client.post(CHAT_HISTORY_GET, {
+ user,
+ id: conversationId,
+ });
+ return response.data.messages;
+ },
+);
+
+export const deleteConversation = createAsyncThunkWrapper(
+ "conversation/delete",
+ async ({ user, conversationId }: { user: string; conversationId: string }, { dispatch }) => {
+ const response = await client.post(CHAT_HISTORY_DELETE, {
+ user,
+ id: conversationId,
+ });
+
+ dispatch(newConversation());
+ dispatch(getAllConversations({ user }));
+ return response.data;
+ },
+);
+
+export const doConversation = (conversationRequest: ConversationRequest) => {
+ const { conversationId, userPrompt, messages, model } = conversationRequest;
+ store.dispatch(addMessageToMessages(messages[0]));
+ store.dispatch(addMessageToMessages(userPrompt));
+ const userPromptWithoutTime = {
+ role: userPrompt.role,
+ content: userPrompt.content,
+ };
+ const body = {
+ messages: [...messages, userPromptWithoutTime],
+ model,
+ };
+
+ // let conversation: Conversation;
+ let result = "";
+ try {
+ fetchEventSource(CHAT_QNA_URL, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify(body),
+ openWhenHidden: true,
+ async onopen(response) {
+ if (response.ok) {
+ return;
+ } else if (response.status >= 400 && response.status < 500 && response.status !== 429) {
+ const e = await response.json();
+ console.log(e);
+ throw Error(e.error.message);
+ } else {
+ console.log("error", response);
+ }
+ },
+ onmessage(msg) {
+ if (msg?.data != "[DONE]") {
+ try {
+ const match = msg.data.match(/b'([^']*)'/);
+ if (match && match[1] != "") {
+ const extractedText = match[1];
+ result += extractedText;
+ store.dispatch(setOnGoingResult(result));
+ }
+ } catch (e) {
+ console.log("something wrong in msg", e);
+ throw e;
+ }
+ }
+ },
+ onerror(err) {
+ console.log("error", err);
+ store.dispatch(setOnGoingResult(""));
+ //notify here
+ throw err;
+ //handle error
+ },
+ onclose() {
+ //handle close
+ const m: Message = {
+ role: MessageRole.Assistant,
+ content: result,
+ time: getCurrentTimeStamp().toString(),
+ };
+ store.dispatch(setOnGoingResult(""));
+
+ store.dispatch(addMessageToMessages(m));
+
+ store.dispatch(
+ saveConversationtoDatabase({
+ conversation: {
+ id: conversationId,
+ },
+ }),
+ );
+ },
+ });
+ } catch (err) {
+ console.log(err);
+ }
+};
+
+export const { logout, setOnGoingResult, newConversation, addMessageToMessages, setSelectedConversationId } =
+ ConversationSlice.actions;
+export const conversationSelector = (state: RootState) => state.conversationReducer;
+export default ConversationSlice.reducer;
diff --git a/ProductivitySuite/docker/ui/react/src/redux/Prompt/PromptSlice.ts b/ProductivitySuite/docker/ui/react/src/redux/Prompt/PromptSlice.ts
new file mode 100644
index 000000000..19a479ab0
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/redux/Prompt/PromptSlice.ts
@@ -0,0 +1,74 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { createSlice, PayloadAction } from "@reduxjs/toolkit";
+import { createAsyncThunkWrapper } from "../thunkUtil";
+import client from "../../common/client";
+import { RootState } from "../store";
+import { notifications } from "@mantine/notifications";
+import { PROMPT_MANAGER_CREATE, PROMPT_MANAGER_GET } from "../../config";
+
+type promptReducer = {
+ prompts: Prompt[];
+};
+
+type Prompt = {
+ id: string;
+ prompt_text: string;
+ user: string;
+};
+
+const initialState: promptReducer = {
+ prompts: [],
+};
+
+export const PromptSlice = createSlice({
+ name: "Prompts",
+ initialState,
+ reducers: {
+ clearPrompts: (state) => {
+ state.prompts = [];
+ },
+ },
+ extraReducers(builder) {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ builder.addCase(getPrompts.fulfilled, (state, action: PayloadAction) => {
+ state.prompts = action.payload;
+ });
+ builder.addCase(addPrompt.fulfilled, () => {
+ notifications.show({
+ message: "Prompt added SuccessFully",
+ });
+ });
+ },
+});
+
+export const { clearPrompts } = PromptSlice.actions;
+export const promptSelector = (state: RootState) => state.promptReducer;
+export default PromptSlice.reducer;
+
+export const getPrompts = createAsyncThunkWrapper(
+ "prompts/getPrompts",
+ async ({ promptText }: { promptText: string | null }, { getState }) => {
+ // @ts-ignore
+ const state: RootState = getState();
+ const response = await client.post(PROMPT_MANAGER_GET, {
+ promptText: promptText,
+ user: state.userReducer.name,
+ });
+ return response.data;
+ },
+);
+
+export const addPrompt = createAsyncThunkWrapper(
+ "prompts/addPrompt",
+ async ({ promptText }: { promptText: string }, { getState }) => {
+ // @ts-ignore
+ const state: RootState = getState();
+ const response = await client.post(PROMPT_MANAGER_CREATE, {
+ prompt_text: promptText,
+ user: state.userReducer.name,
+ });
+ return response.data;
+ },
+);
diff --git a/ProductivitySuite/docker/ui/react/src/redux/User/user.d.ts b/ProductivitySuite/docker/ui/react/src/redux/User/user.d.ts
new file mode 100644
index 000000000..ce5a32e8c
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/redux/User/user.d.ts
@@ -0,0 +1,6 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+export interface User {
+ name: string;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/redux/User/userSlice.ts b/ProductivitySuite/docker/ui/react/src/redux/User/userSlice.ts
new file mode 100644
index 000000000..ff8c09940
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/redux/User/userSlice.ts
@@ -0,0 +1,26 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { createSlice, PayloadAction } from "@reduxjs/toolkit";
+import { RootState } from "../store";
+import { User } from "./user";
+
+const initialState: User = {
+ name: "",
+};
+
+export const userSlice = createSlice({
+ name: "user",
+ initialState,
+ reducers: {
+ setUser: (state, action: PayloadAction) => {
+ state.name = action.payload;
+ },
+ removeUser: (state) => {
+ state.name = "";
+ },
+ },
+});
+export const { setUser, removeUser } = userSlice.actions;
+export const userSelector = (state: RootState) => state.userReducer;
+export default userSlice.reducer;
diff --git a/ProductivitySuite/docker/ui/react/src/redux/store.ts b/ProductivitySuite/docker/ui/react/src/redux/store.ts
new file mode 100644
index 000000000..4d33a8575
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/redux/store.ts
@@ -0,0 +1,50 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { combineReducers, configureStore } from "@reduxjs/toolkit";
+import userReducer from "./User/userSlice";
+import conversationReducer from "./Conversation/ConversationSlice";
+import promptReducer from "./Prompt/PromptSlice";
+import { TypedUseSelectorHook, useDispatch, useSelector } from "react-redux";
+
+export const store = configureStore({
+ reducer: combineReducers({
+ userReducer,
+ conversationReducer,
+ promptReducer,
+ }),
+ devTools: import.meta.env.PROD || true,
+ // preloadedState: loadFromLocalStorage(),
+ middleware: (getDefaultMiddleware) =>
+ getDefaultMiddleware({
+ serializableCheck: false,
+ }),
+});
+
+// function saveToLocalStorage(state: ReturnType) {
+// try {
+// const serialState = JSON.stringify(state);
+// localStorage.setItem("reduxStore", serialState);
+// } catch (e) {
+// console.warn(e);
+// }
+// }
+
+// function loadFromLocalStorage() {
+// try {
+// const serialisedState = localStorage.getItem("reduxStore");
+// if (serialisedState === null) return undefined;
+// return JSON.parse(serialisedState);
+// } catch (e) {
+// console.warn(e);
+// return undefined;
+// }
+// }
+
+// store.subscribe(() => saveToLocalStorage(store.getState()));
+export default store;
+export type AppDispatch = typeof store.dispatch;
+export type RootState = ReturnType;
+
+export const useAppDispatch: () => AppDispatch = useDispatch;
+export const useAppSelector: TypedUseSelectorHook = useSelector;
diff --git a/ProductivitySuite/docker/ui/react/src/redux/thunkUtil.ts b/ProductivitySuite/docker/ui/react/src/redux/thunkUtil.ts
new file mode 100644
index 000000000..5df362fd3
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/redux/thunkUtil.ts
@@ -0,0 +1,25 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { createAsyncThunk, AsyncThunkPayloadCreator, AsyncThunk } from "@reduxjs/toolkit";
+
+interface ThunkAPIConfig {}
+
+export const createAsyncThunkWrapper = (
+ type: string,
+ thunk: AsyncThunkPayloadCreator, // <-- very unsure of this - have tried many things here
+): AsyncThunk => {
+ return createAsyncThunk(
+ type,
+ // @ts-ignore
+ async (arg, thunkAPI) => {
+ try {
+ // do some stuff here that happens on every action
+ return await thunk(arg, thunkAPI);
+ } catch (err) {
+ // do some stuff here that happens on every error
+ return thunkAPI.rejectWithValue(err);
+ }
+ },
+ );
+};
diff --git a/ProductivitySuite/docker/ui/react/src/styles/components/_sidebar.scss b/ProductivitySuite/docker/ui/react/src/styles/components/_sidebar.scss
new file mode 100644
index 000000000..23018ee1f
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/styles/components/_sidebar.scss
@@ -0,0 +1,8 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "../layout/flex";
+
+@mixin sidebar {
+ @include flex(column, nowrap, flex-start, flex-start);
+}
diff --git a/ProductivitySuite/docker/ui/react/src/styles/components/content.scss b/ProductivitySuite/docker/ui/react/src/styles/components/content.scss
new file mode 100644
index 000000000..9a230f249
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/styles/components/content.scss
@@ -0,0 +1,5 @@
+@mixin textWrapEllipsis {
+ text-overflow: ellipsis;
+ white-space: nowrap;
+ overflow: hidden;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/styles/components/context.module.scss b/ProductivitySuite/docker/ui/react/src/styles/components/context.module.scss
new file mode 100644
index 000000000..9577838ef
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/styles/components/context.module.scss
@@ -0,0 +1,70 @@
+@import "../layout/flex";
+@import "../components/content.scss";
+
+.contextWrapper {
+ background-color: light-dark(var(--mantine-color-gray-0), var(--mantine-color-dark-6));
+ border-right: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-4));
+ width: 180px;
+ overflow-y: hidden;
+ overflow-x: hidden;
+ // overflow-y: auto;
+
+ .contextTitle {
+ position: sticky;
+ top: 0;
+ font-family:
+ Greycliff CF,
+ var(--mantine-font-family);
+ margin-bottom: var(--mantine-spacing-xl);
+ background-color: var(--mantine-color-body);
+ padding: var(--mantine-spacing-md);
+ padding-top: 18px;
+ width: 100%;
+ height: 60px;
+ border-bottom: 1px solid light-dark(var(--mantine-color-gray-3), var(--mantine-color-dark-7));
+ }
+
+ .contextList {
+ height: 90vh;
+ // display: flex();
+
+ .contextListItem {
+ display: flex;
+ flex: 1 1 auto;
+ justify-content: center;
+ align-items: center;
+ text-decoration: none;
+ border-top-right-radius: var(--mantine-radius-md);
+ border-bottom-right-radius: var(--mantine-radius-md);
+ color: light-dark(var(--mantine-color-gray-7), var(--mantine-color-dark-0));
+ padding: 0 var(--mantine-spacing-md);
+ font-size: var(--mantine-font-size-sm);
+ margin-right: var(--mantine-spacing-md);
+ font-weight: 500;
+ height: 44px;
+ width: 100%;
+ line-height: 44px;
+ cursor: pointer;
+
+ .contextItemName {
+ flex: 1 1 auto;
+ width: 110px;
+ @include textWrapEllipsis;
+ }
+
+ &:hover {
+ background-color: light-dark(var(--mantine-color-gray-1), var(--mantine-color-dark-5));
+ color: light-dark(var(--mantine-color-dark), var(--mantine-color-light));
+ }
+
+ &[data-active] {
+ &,
+ &:hover {
+ border-left-color: var(--mantine-color-blue-filled);
+ background-color: var(--mantine-color-blue-filled);
+ color: var(--mantine-color-white);
+ }
+ }
+ }
+ }
+}
diff --git a/ProductivitySuite/docker/ui/react/src/styles/layout/_basics.scss b/ProductivitySuite/docker/ui/react/src/styles/layout/_basics.scss
new file mode 100644
index 000000000..d11b1ef21
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/styles/layout/_basics.scss
@@ -0,0 +1,7 @@
+@mixin absolutes {
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/styles/layout/_flex.scss b/ProductivitySuite/docker/ui/react/src/styles/layout/_flex.scss
new file mode 100644
index 000000000..18d2ce8ec
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/styles/layout/_flex.scss
@@ -0,0 +1,6 @@
+@mixin flex($direction: row, $wrap: nowrap, $alignItems: center, $justifyContent: center) {
+ display: flex;
+ flex-flow: $direction $wrap;
+ align-items: $alignItems;
+ justify-content: $justifyContent;
+}
diff --git a/ProductivitySuite/docker/ui/react/src/styles/styles.scss b/ProductivitySuite/docker/ui/react/src/styles/styles.scss
new file mode 100644
index 000000000..8028d8ad6
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/styles/styles.scss
@@ -0,0 +1,5 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+@import "layout/flex";
+@import "layout/basics";
diff --git a/ProductivitySuite/docker/ui/react/src/vite-env.d.ts b/ProductivitySuite/docker/ui/react/src/vite-env.d.ts
new file mode 100644
index 000000000..4260915f7
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/src/vite-env.d.ts
@@ -0,0 +1,4 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+///
diff --git a/ProductivitySuite/docker/ui/react/tsconfig.json b/ProductivitySuite/docker/ui/react/tsconfig.json
new file mode 100644
index 000000000..f50b75c5f
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/tsconfig.json
@@ -0,0 +1,23 @@
+{
+ "compilerOptions": {
+ "target": "ES2020",
+ "useDefineForClassFields": true,
+ "lib": ["ES2020", "DOM", "DOM.Iterable"],
+ "module": "ESNext",
+ "skipLibCheck": true,
+
+ "moduleResolution": "bundler",
+ "allowImportingTsExtensions": true,
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "noEmit": true,
+ "jsx": "react-jsx",
+
+ "strict": true,
+ "noUnusedLocals": true,
+ "noUnusedParameters": true,
+ "noFallthroughCasesInSwitch": true
+ },
+ "include": ["src"],
+ "references": [{ "path": "./tsconfig.node.json" }]
+}
diff --git a/ProductivitySuite/docker/ui/react/tsconfig.node.json b/ProductivitySuite/docker/ui/react/tsconfig.node.json
new file mode 100644
index 000000000..97ede7ee6
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/tsconfig.node.json
@@ -0,0 +1,11 @@
+{
+ "compilerOptions": {
+ "composite": true,
+ "skipLibCheck": true,
+ "module": "ESNext",
+ "moduleResolution": "bundler",
+ "allowSyntheticDefaultImports": true,
+ "strict": true
+ },
+ "include": ["vite.config.ts"]
+}
diff --git a/ProductivitySuite/docker/ui/react/vite.config.ts b/ProductivitySuite/docker/ui/react/vite.config.ts
new file mode 100644
index 000000000..bb171b48d
--- /dev/null
+++ b/ProductivitySuite/docker/ui/react/vite.config.ts
@@ -0,0 +1,27 @@
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+import { defineConfig } from "vitest/config";
+import react from "@vitejs/plugin-react";
+
+// https://vitejs.dev/config/
+export default defineConfig({
+ css: {
+ preprocessorOptions: {
+ scss: {
+ additionalData: `@import "./src/styles/styles.scss";`,
+ },
+ },
+ },
+ plugins: [react()],
+ server: {
+ port: 5174,
+ },
+ test: {
+ globals: true,
+ environment: "jsdom",
+ },
+ define: {
+ "import.meta.env": process.env,
+ },
+});
diff --git a/ProductivitySuite/docker/xeon/README.md b/ProductivitySuite/docker/xeon/README.md
new file mode 100644
index 000000000..0fd16aade
--- /dev/null
+++ b/ProductivitySuite/docker/xeon/README.md
@@ -0,0 +1,547 @@
+# Build Mega Service of Productivity Suite on Xeon
+
+This document outlines the deployment process for OPEA Productivity Suite utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Xeon server and [GenAIExamples](https://github.com/opea-project/GenAIExamppes.git) solutions. The steps include Docker image creation, container deployment via Docker Compose, and service execution to integrate microservices such as `embedding`, `retriever`, `rerank`, and `llm`. We will publish the Docker images to Docker Hub soon, it will simplify the deployment process for this service.
+
+## 🚀 Build Docker Images
+
+First of all, you need to build Docker Images locally and install the python package of it.
+
+```bash
+git clone https://github.com/opea-project/GenAIComps.git
+cd GenAIComps
+```
+
+### 1. Build Embedding Image
+
+```bash
+docker build --no-cache -t opea/embedding-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/embeddings/langchain/docker/Dockerfile .
+```
+
+### 2. Build Retriever Image
+
+```bash
+docker build --no-cache -t opea/retriever-redis:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/retrievers/langchain/redis/docker/Dockerfile .
+```
+
+### 3. Build Rerank Image
+
+```bash
+docker build --no-cache -t opea/reranking-tei:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/reranks/tei/docker/Dockerfile .
+```
+
+### 4. Build LLM Image
+
+#### Use TGI as backend
+
+```bash
+docker build --no-cache -t opea/llm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/text-generation/tgi/Dockerfile .
+```
+
+### 5. Build Dataprep Image
+
+```bash
+docker build --no-cache -t opea/dataprep-redis:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/dataprep/redis/langchain/docker/Dockerfile .
+
+```
+
+### 6. Build Prompt Registry Image
+
+```bash
+docker build -t opea/promptregistry-mongo-server:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/prompt_registry/mongo/docker/Dockerfile .
+
+
+```
+
+### 7. Build Chat History Image
+
+```bash
+docker build -t opea/chathistory-mongo-server:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/chathistory/mongo/docker/Dockerfile .
+cd ..
+```
+
+### 8. Build MegaService Docker Images
+
+The Productivity Suite is composed of multiple GenAIExample reference solutions composed together.
+
+### 8.1 Build ChatQnA MegaService Docker Images
+
+```bash
+git clone https://github.com/opea-project/GenAIExamples.git
+cd GenAIExamples/ChatQnA/docker
+docker build --no-cache -t opea/chatqna:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
+cd ../../..
+```
+
+### 8.2 Build DocSum Megaservice Docker Images
+
+```bash
+cd GenAIExamples/DocSum/docker
+docker build --no-cache -t opea/docsum:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
+cd ../../..
+```
+
+### 8.3 Build CodeGen Megaservice Docker Images
+
+```bash
+cd GenAIExamples/CodeGen/docker
+docker build --no-cache -t opea/codegen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
+cd ../../..
+```
+
+### 8.4 Build FAQGen Megaservice Docker Images
+
+```bash
+cd GenAIExamples/FaqGen/docker
+docker build --no-cache -t opea/faqgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
+cd ../../..
+```
+
+### 9. Build UI Docker Image
+
+Build frontend Docker image that enables via below command:
+
+**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable**
+
+```bash
+cd GenAIExamples/ProductivitySuite/docker/ui/
+docker build --no-cache -t ProductivitySuite/docker/xeon/compose.yaml ./docker/Dockerfile.react .
+cd ../../../..
+```
+
+## 🚀 Start Microservices
+
+### Setup Environment Variables
+
+Since the `compose.yaml` will consume some environment variables, you need to setup them in advance as below.
+
+**Export the value of the public IP address of your Xeon server to the `host_ip` environment variable**
+
+> Change the External_Public_IP below with the actual IPV4 value
+
+```
+export host_ip="External_Public_IP"
+```
+
+**Export the value of your Huggingface API token to the `your_hf_api_token` environment variable**
+
+> Change the Your_Huggingface_API_Token below with tyour actual Huggingface API Token value
+
+```
+export your_hf_api_token="Your_Huggingface_API_Token"
+```
+
+**Append the value of the public IP address to the no_proxy list**
+
+```
+export your_no_proxy=${your_no_proxy},"External_Public_IP"
+```
+
+```bash
+export MONGO_HOST=${host_ip}
+export MONGO_PORT=27017
+export DB_NAME="test"
+export COLLECTION_NAME="Conversations"
+export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
+export RERANK_MODEL_ID="BAAI/bge-reranker-base"
+export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
+export LLM_MODEL_ID_CODEGEN="meta-llama/CodeLlama-7b-hf"
+export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"
+export TEI_RERANKING_ENDPOINT="http://${host_ip}:8808"
+export TGI_LLM_ENDPOINT="http://${host_ip}:9009"
+export REDIS_URL="redis://${host_ip}:6379"
+export INDEX_NAME="rag-redis"
+export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token}
+export MEGA_SERVICE_HOST_IP=${host_ip}
+export EMBEDDING_SERVICE_HOST_IP=${host_ip}
+export RETRIEVER_SERVICE_HOST_IP=${host_ip}
+export RERANK_SERVICE_HOST_IP=${host_ip}
+export LLM_SERVICE_HOST_IP=${host_ip}
+export LLM_SERVICE_HOST_IP_DOCSUM=${host_ip}
+export LLM_SERVICE_HOST_IP_FAQGEN=${host_ip}
+export LLM_SERVICE_HOST_IP_CODEGEN=${host_ip}
+export LLM_SERVICE_HOST_IP_CHATQNA=${host_ip}
+export TGI_LLM_ENDPOINT_CHATQNA="http://${host_ip}:9009"
+export TGI_LLM_ENDPOINT_CODEGEN="http://${host_ip}:8028"
+export TGI_LLM_ENDPOINT_FAQGEN="http://${host_ip}:9009"
+export TGI_LLM_ENDPOINT_DOCSUM="http://${host_ip}:9009"
+export BACKEND_SERVICE_ENDPOINT_CHATQNA="http://${host_ip}:8888/v1/chatqna"
+export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6009/v1/dataprep/delete_file"
+export BACKEND_SERVICE_ENDPOINT_FAQGEN="http://${host_ip}:8889/v1/faqgen"
+export BACKEND_SERVICE_ENDPOINT_CODEGEN="http://${host_ip}:7778/v1/codegen"
+export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${host_ip}:8890/v1/docsum"
+export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep"
+export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get_file"
+export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
+export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
+export CHAT_HISTORY_DELETE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/delete"
+export CHAT_HISTORY_GET_ENDPOINT="http://${host_ip}:6012/v1/chathistory/get"
+export PROMPT_SERVICE_GET_ENDPOINT="http://${host_ip}:6015/v1/prompt/get"
+export PROMPT_SERVICE_CREATE_ENDPOINT="http://${host_ip}:6015/v1/prompt/create"
+export KEYCLOAK_SERVICE_ENDPOINT="http://${host_ip}:8080"
+export LLM_SERVICE_HOST_PORT_FAQGEN=9002
+export LLM_SERVICE_HOST_PORT_CODEGEN=9001
+export LLM_SERVICE_HOST_PORT_DOCSUM=9003
+export PROMPT_COLLECTION_NAME="prompt"
+```
+
+Note: Please replace with `host_ip` with you external IP address, do not use localhost.
+
+### Start all the services Docker Containers
+
+> Before running the docker compose command, you need to be in the folder that has the docker compose yaml file
+
+```bash
+cd GenAIExamples/ProductivitySuite/docker/xeon/
+```
+
+```bash
+docker compose -f compose.yaml up -d
+```
+
+### Setup Keycloak
+
+Please refer to [keycloak_setup_guide](keycloak_setup_guide.md) for more detail related to Keycloak configuration setup.
+
+### Validate Microservices
+
+1. TEI Embedding Service
+
+```bash
+curl ${host_ip}:6006/embed \
+ -X POST \
+ -d '{"inputs":"What is Deep Learning?"}' \
+ -H 'Content-Type: application/json'
+```
+
+2. Embedding Microservice
+
+```bash
+curl http://${host_ip}:6000/v1/embeddings\
+ -X POST \
+ -d '{"text":"hello"}' \
+ -H 'Content-Type: application/json'
+```
+
+3. Retriever Microservice
+
+To consume the retriever microservice, you need to generate a mock embedding vector by Python script. The length of embedding vector
+is determined by the embedding model.
+Here we use the model `EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"`, which vector size is 768.
+
+Check the vector dimension of your embedding model, set `your_embedding` dimension equals to it.
+
+```bash
+export your_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)")
+curl http://${host_ip}:7000/v1/retrieval \
+ -X POST \
+ -d "{\"text\":\"test\",\"embedding\":${your_embedding}}" \
+ -H 'Content-Type: application/json'
+```
+
+4. TEI Reranking Service
+
+```bash
+curl http://${host_ip}:8808/rerank \
+ -X POST \
+ -d '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' \
+ -H 'Content-Type: application/json'
+```
+
+5. Reranking Microservice
+
+```bash
+curl http://${host_ip}:8000/v1/reranking\
+ -X POST \
+ -d '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}' \
+ -H 'Content-Type: application/json'
+```
+
+6. LLM backend Service (ChatQnA, DocSum, FAQGen)
+
+```bash
+curl http://${host_ip}:9009/generate \
+ -X POST \
+ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \
+ -H 'Content-Type: application/json'
+```
+
+8. LLM backend Service (CodeGen)
+
+```bash
+curl http://${host_ip}:8028/generate \
+ -X POST \
+ -d '{"inputs":"def print_hello_world():","parameters":{"max_new_tokens":256, "do_sample": true}}' \
+ -H 'Content-Type: application/json'
+```
+
+9. ChatQnA LLM Microservice
+
+```bash
+curl http://${host_ip}:9000/v1/chat/completions\
+ -X POST \
+ -d '{"query":"What is Deep Learning?","max_new_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"streaming":true}' \
+ -H 'Content-Type: application/json'
+```
+
+10. CodeGen LLM Microservice
+
+```bash
+curl http://${host_ip}:9001/v1/chat/completions\
+ -X POST \
+ -d '{"query":"def print_hello_world():"}' \
+ -H 'Content-Type: application/json'
+```
+
+11. DocSum LLM Microservice
+
+```bash
+curl http://${host_ip}:9002/v1/chat/docsum\
+ -X POST \
+ -d '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5"}' \
+ -H 'Content-Type: application/json'
+```
+
+12. FAQGen LLM Microservice
+
+```bash
+curl http://${host_ip}:9003/v1/faqgen\
+ -X POST \
+ -d '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5"}' \
+ -H 'Content-Type: application/json'
+```
+
+13. ChatQnA MegaService
+
+```bash
+curl http://${host_ip}:8888/v1/chatqna -H "Content-Type: application/json" -d '{
+ "messages": "What is the revenue of Nike in 2023?"
+ }'
+```
+
+14. FAQGen MegaService
+
+```bash
+curl http://${host_ip}:8889/v1/faqgen -H "Content-Type: application/json" -d '{
+ "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."
+ }'
+```
+
+15. DocSum MegaService
+
+```bash
+curl http://${host_ip}:8890/v1/docsum -H "Content-Type: application/json" -d '{
+ "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."
+ }'
+```
+
+16. CodeGen MegaService
+
+```bash
+curl http://${host_ip}:7778/v1/codegen -H "Content-Type: application/json" -d '{
+ "messages": "def print_hello_world():"
+ }'
+```
+
+17. Dataprep Microservice
+
+If you want to update the default knowledge base, you can use the following commands:
+
+Update Knowledge Base via Local File Upload:
+
+```bash
+curl -X POST "http://${host_ip}:6007/v1/dataprep" \
+ -H "Content-Type: multipart/form-data" \
+ -F "files=@./nke-10k-2023.pdf"
+```
+
+This command updates a knowledge base by uploading a local file for processing. Update the file path according to your environment.
+
+Add Knowledge Base via HTTP Links:
+
+```bash
+curl -X POST "http://${host_ip}:6007/v1/dataprep" \
+ -H "Content-Type: multipart/form-data" \
+ -F 'link_list=["https://opea.dev"]'
+```
+
+This command updates a knowledge base by submitting a list of HTTP links for processing.
+
+Also, you are able to get the file list that you uploaded:
+
+```bash
+curl -X POST "http://${host_ip}:6007/v1/dataprep/get_file" \
+ -H "Content-Type: application/json"
+```
+
+To delete the file/link you uploaded:
+
+```bash
+# delete link
+curl -X POST "http://${host_ip}:6007/v1/dataprep/delete_file" \
+ -d '{"file_path": "https://opea.dev.txt"}' \
+ -H "Content-Type: application/json"
+
+# delete file
+curl -X POST "http://${host_ip}:6007/v1/dataprep/delete_file" \
+ -d '{"file_path": "nke-10k-2023.pdf"}' \
+ -H "Content-Type: application/json"
+
+# delete all uploaded files and links
+curl -X POST "http://${host_ip}:6007/v1/dataprep/delete_file" \
+ -d '{"file_path": "all"}' \
+ -H "Content-Type: application/json"
+```
+
+18. Prompt Registry Microservice
+
+If you want to update the default Prompts in the application for your user, you can use the following commands:
+
+```bash
+curl -X 'POST' \
+ http://{host_ip}:6015/v1/prompt/create \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "prompt_text": "test prompt", "user": "test"
+}'
+```
+
+Retrieve prompt from database based on user or prompt_id
+
+```bash
+curl -X 'POST' \
+ http://{host_ip}:6015/v1/prompt/get \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "user": "test"}'
+
+curl -X 'POST' \
+ http://{host_ip}:6015/v1/prompt/get \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "user": "test", "prompt_id":"{prompt_id returned from save prompt route above}"}'
+```
+
+Delete prompt from database based on prompt_id provided
+
+```bash
+curl -X 'POST' \
+ http://{host_ip}:6015/v1/prompt/delete \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "user": "test", "prompt_id":"{prompt_id to be deleted}"}'
+```
+
+19. Chat History Microservice
+
+To validate the chatHistory Microservice, you can use the following commands.
+
+Create a sample conversation and get the message ID.
+
+```bash
+curl -X 'POST' \
+ http://${host_ip}:6012/v1/chathistory/create \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "data": {
+ "messages": "test Messages", "user": "test"
+ }
+}'
+```
+
+Retrieve the conversation based on user or conversation id
+
+```bash
+curl -X 'POST' \
+ http://${host_ip}:6012/v1/chathistory/get \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "user": "test"}'
+
+curl -X 'POST' \
+ http://${host_ip}:6012/v1/chathistory/get \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "user": "test", "id":"{Conversation id to retrieve }"}'
+```
+
+Delete Conversation from database based on conversation id provided.
+
+```bash
+curl -X 'POST' \
+ http://${host_ip}:6012/v1/chathistory/delete \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "user": "test", "id":"{Conversation id to Delete}"}'
+```
+
+## 🚀 Launch the UI
+
+To access the frontend, open the following URL in your browser: http://{host_ip}:5174. By default, the UI runs on port 80 internally. If you prefer to use a different host port to access the frontend, you can modify the port mapping in the `compose.yaml` file as shown below:
+
+```yaml
+ productivity-suite-xeon-react-ui-server:
+ image: opea/productivity-suite-react-ui-server:latest
+ ...
+ ports:
+ - "5715:80" # Map port 5715 on the host to port 80 in the container.
+```
+
+Here is an example of running Productivity Suite
+![project-screenshot](../../assets/img/chat_qna_init.png)
+![project-screenshot](../../assets/img/Login_page.png)
+
+🧐 Features
+
+Here're some of the project's features:
+
+#### CHAT QNA
+
+- Start a Text Chat:Initiate a text chat with the ability to input written conversations, where the dialogue content can also be customized based on uploaded files.
+- Context Awareness: The AI assistant maintains the context of the conversation, understanding references to previous statements or questions. This allows for more natural and coherent exchanges.
+
+ ##### DATA SOURCE
+
+ - The choice between uploading locally or copying a remote link. Chat according to uploaded knowledge base.
+ - Uploaded File would get listed and user would be able add or remove file/links
+
+ ###### Screen Shot
+
+ ![project-screenshot](../../assets/img/data_source.png)
+
+- Clear: Clear the record of the current dialog box without retaining the contents of the dialog box.
+- Chat history: Historical chat records can still be retained after refreshing, making it easier for users to view the context.
+- Conversational Chat : The application maintains a history of the conversation, allowing users to review previous messages and the AI to refer back to earlier points in the dialogue when necessary.
+ ###### Screen Shots
+ ![project-screenshot](../../assets/img/chat_qna_init.png)
+ ![project-screenshot](../../assets/img/chatqna_with_conversation.png)
+
+#### CODEGEN
+
+- Generate code: generate the corresponding code based on the current user's input.
+ ###### Screen Shot
+ ![project-screenshot](../../assets/img/codegen.png)
+
+#### DOC SUMMARY
+
+- Summarizing Uploaded Files: Upload files from their local device, then click 'Generate Summary' to summarize the content of the uploaded file. The summary will be displayed on the 'Summary' box.
+- Summarizing Text via Pasting: Paste the text to be summarized into the text box, then click 'Generate Summary' to produce a condensed summary of the content, which will be displayed in the 'Summary' box on the right.
+- Scroll to Bottom: The summarized content will automatically scroll to the bottom.
+ ###### Screen Shot
+ ![project-screenshot](../../assets/img/doc_summary_paste.png)
+ ![project-screenshot](../../assets/img/doc_summary_file.png)
+
+#### FAQ Generator
+
+- Generate FAQs from Text via Pasting: Paste the text to into the text box, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below.
+
+- Generate FAQs from Text via txt file Upload: Upload the file in the Upload bar, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below.
+ ###### Screen Shot
+ ![project-screenshot](../../assets/img/faq_generator.png)
diff --git a/ProductivitySuite/docker/xeon/compose.yaml b/ProductivitySuite/docker/xeon/compose.yaml
new file mode 100644
index 000000000..7f1ac1d9f
--- /dev/null
+++ b/ProductivitySuite/docker/xeon/compose.yaml
@@ -0,0 +1,370 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+version: "3.3"
+
+services:
+ redis-vector-db:
+ image: redis/redis-stack:7.2.0-v9
+ container_name: redis-vector-db
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ ports:
+ - "6379:6379"
+ - "8001:8001"
+ dataprep-redis-service:
+ image: ${REGISTRY:-opea}/dataprep-redis:${TAG:-latest}
+ container_name: dataprep-redis-server
+ depends_on:
+ - redis-vector-db
+ ports:
+ - "6007:6007"
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ REDIS_URL: ${REDIS_URL}
+ INDEX_NAME: ${INDEX_NAME}
+ tei-embedding-service:
+ image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
+ container_name: tei-embedding-server
+ ports:
+ - "6006:80"
+ volumes:
+ - "./data_embedding:/data"
+ shm_size: 1g
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate
+ embedding:
+ image: ${REGISTRY:-opea}/embedding-tei:${TAG:-latest}
+ container_name: embedding-tei-server
+ depends_on:
+ - tei-embedding-service
+ ports:
+ - "6000:6000"
+ ipc: host
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
+ LANGCHAIN_PROJECT: "opea-embedding-service"
+ restart: unless-stopped
+ retriever:
+ image: ${REGISTRY:-opea}/retriever-redis:${TAG:-latest}
+ container_name: retriever-redis-server
+ depends_on:
+ - redis-vector-db
+ ports:
+ - "7000:7000"
+ ipc: host
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ REDIS_URL: ${REDIS_URL}
+ INDEX_NAME: ${INDEX_NAME}
+ TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT}
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
+ LANGCHAIN_PROJECT: "opea-retriever-service"
+ restart: unless-stopped
+ tei-reranking-service:
+ image: ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
+ container_name: tei-reranking-server
+ ports:
+ - "8808:80"
+ volumes:
+ - "./data_tei:/data"
+ shm_size: 1g
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ HF_HUB_DISABLE_PROGRESS_BARS: 1
+ HF_HUB_ENABLE_HF_TRANSFER: 0
+ command: --model-id ${RERANK_MODEL_ID} --auto-truncate
+ reranking:
+ image: ${REGISTRY:-opea}/reranking-tei:${TAG:-latest}
+ container_name: reranking-tei-xeon-server
+ depends_on:
+ - tei-reranking-service
+ ports:
+ - "8000:8000"
+ ipc: host
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ TEI_RERANKING_ENDPOINT: ${TEI_RERANKING_ENDPOINT}
+ HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ HF_HUB_DISABLE_PROGRESS_BARS: 1
+ HF_HUB_ENABLE_HF_TRANSFER: 0
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
+ LANGCHAIN_PROJECT: "opea-reranking-service"
+ restart: unless-stopped
+ tgi_service:
+ image: ghcr.io/huggingface/text-generation-inference:2.1.0
+ container_name: tgi-service
+ ports:
+ - "9009:80"
+ volumes:
+ - "./data:/data"
+ shm_size: 1g
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ HF_HUB_DISABLE_PROGRESS_BARS: 1
+ HF_HUB_ENABLE_HF_TRANSFER: 0
+ command: --model-id ${LLM_MODEL_ID}
+ llm:
+ image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
+ container_name: llm-tgi-server
+ depends_on:
+ - tgi_service
+ ports:
+ - "9000:9000"
+ ipc: host
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT_CHATQNA}
+ HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ HF_HUB_DISABLE_PROGRESS_BARS: 1
+ HF_HUB_ENABLE_HF_TRANSFER: 0
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
+ LANGCHAIN_PROJECT: "opea-llm-service"
+ restart: unless-stopped
+ chatqna-xeon-backend-server:
+ image: ${REGISTRY:-opea}/chatqna:${TAG:-latest}
+ container_name: chatqna-xeon-backend-server
+ depends_on:
+ - redis-vector-db
+ - tei-embedding-service
+ - embedding
+ - retriever
+ - tei-reranking-service
+ - reranking
+ - tgi_service
+ - llm
+ ports:
+ - "8888:8888"
+ environment:
+ no_proxy: ${no_proxy}
+ https_proxy: ${https_proxy}
+ http_proxy: ${http_proxy}
+ MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP}
+ EMBEDDING_SERVICE_HOST_IP: ${EMBEDDING_SERVICE_HOST_IP}
+ RETRIEVER_SERVICE_HOST_IP: ${RETRIEVER_SERVICE_HOST_IP}
+ RERANK_SERVICE_HOST_IP: ${RERANK_SERVICE_HOST_IP}
+ LLM_SERVICE_HOST_IP: ${LLM_SERVICE_HOST_IP_CHATQNA}
+ ipc: host
+ restart: always
+ tgi_service_codegen:
+ image: ghcr.io/huggingface/text-generation-inference:2.1.0
+ container_name: tgi_service_codegen
+ ports:
+ - "8028:80"
+ volumes:
+ - "./data_codegen:/data"
+ shm_size: 1g
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ command: --model-id ${LLM_MODEL_ID_CODEGEN}
+ llm_codegen:
+ image: ${REGISTRY:-opea}/llm-tgi:${TAG:-latest}
+ container_name: llm-tgi-server-codegen
+ depends_on:
+ - tgi_service_codegen
+ ports:
+ - "9001:9000"
+ ipc: host
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT_CODEGEN}
+ HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
+ LANGCHAIN_PROJECT: "opea-llm-service"
+ restart: unless-stopped
+ codegen-xeon-backend-server:
+ image: ${REGISTRY:-opea}/codegen:${TAG:-latest}
+ container_name: codegen-xeon-backend-server
+ depends_on:
+ - llm
+ ports:
+ - "7778:7778"
+ environment:
+ no_proxy: ${no_proxy}
+ https_proxy: ${https_proxy}
+ http_proxy: ${http_proxy}
+ MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP}
+ LLM_SERVICE_HOST_IP: ${LLM_SERVICE_HOST_IP_CODEGEN}
+ LLM_SERVICE_PORT: ${LLM_SERVICE_HOST_PORT_CODEGEN}
+ ipc: host
+ restart: always
+ llm_faqgen:
+ image: ${REGISTRY:-opea}/llm-faqgen-tgi:${TAG:-latest}
+ container_name: llm-faqgen-server
+ depends_on:
+ - tgi_service
+ ports:
+ - "9002:9000"
+ ipc: host
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT_FAQGEN}
+ HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
+ LANGCHAIN_PROJECT: "opea-llm-service"
+ restart: unless-stopped
+ faqgen-xeon-backend-server:
+ image: ${REGISTRY:-opea}/faqgen:${TAG:-latest}
+ container_name: faqgen-xeon-backend-server
+ depends_on:
+ - tgi_service
+ - llm_faqgen
+ ports:
+ - "8889:8888"
+ environment:
+ no_proxy: ${no_proxy}
+ https_proxy: ${https_proxy}
+ http_proxy: ${http_proxy}
+ MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP}
+ LLM_SERVICE_PORT: ${LLM_SERVICE_HOST_PORT_FAQGEN}
+ LLM_SERVICE_HOST_IP: ${LLM_SERVICE_HOST_IP_FAQGEN}
+ ipc: host
+ restart: always
+ llm_docsum_server:
+ image: ${REGISTRY:-opea}/llm-docsum-tgi:${TAG:-latest}
+ container_name: llm-docsum-server
+ depends_on:
+ - tgi_service
+ ports:
+ - "9003:9000"
+ ipc: host
+ environment:
+ no_proxy: ${no_proxy}
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT_DOCSUM}
+ HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN}
+ LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY}
+ LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2}
+ LANGCHAIN_PROJECT: "opea-llm-service"
+ restart: unless-stopped
+ docsum-xeon-backend-server:
+ image: ${REGISTRY:-opea}/docsum:${TAG:-latest}
+ container_name: docsum-xeon-backend-server
+ depends_on:
+ - tgi_service
+ - llm_docsum_server
+ ports:
+ - "8890:8888"
+ environment:
+ no_proxy: ${no_proxy}
+ https_proxy: ${https_proxy}
+ http_proxy: ${http_proxy}
+ LLM_SERVICE_PORT: ${LLM_SERVICE_HOST_PORT_DOCSUM}
+ MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP}
+ LLM_SERVICE_HOST_IP: ${LLM_SERVICE_HOST_IP_DOCSUM}
+ ipc: host
+ restart: always
+ mongo:
+ image: mongo:7.0.11
+ container_name: mongodb
+ ports:
+ - 27017:27017
+ environment:
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ no_proxy: ${no_proxy}
+ command: mongod --quiet --logpath /dev/null
+
+ chathistory-mongo:
+ image: ${REGISTRY:-opea}/chathistory-mongo-server:${TAG:-latest}
+ container_name: chathistory-mongo-server
+ ports:
+ - "6012:6012"
+ ipc: host
+ environment:
+ http_proxy: ${http_proxy}
+ no_proxy: ${no_proxy}
+ https_proxy: ${https_proxy}
+ MONGO_HOST: ${MONGO_HOST}
+ MONGO_PORT: ${MONGO_PORT}
+ COLLECTION_NAME: ${COLLECTION_NAME}
+ restart: unless-stopped
+
+ promptregistry-mongo:
+ image: ${REGISTRY:-opea}/promptregistry-mongo-server:${TAG:-latest}
+ container_name: promptregistry-mongo-server
+ ports:
+ - "6015:6012"
+ ipc: host
+ environment:
+ http_proxy: ${http_proxy}
+ https_proxy: ${https_proxy}
+ no_proxy: ${no_proxy}
+ MONGO_HOST: ${MONGO_HOST}
+ MONGO_PORT: ${MONGO_PORT}
+ COLLECTION_NAME: ${PROMPT_COLLECTION_NAME}
+ restart: unless-stopped
+ keycloak:
+ image: quay.io/keycloak/keycloak:25.0.2
+ container_name: keycloak-server
+ ports:
+ - 8080:8080
+ environment:
+ - KEYCLOAK_ADMIN=admin
+ - KEYCLOAK_ADMIN_PASSWORD=admin
+ - KC_PROXY=edge
+ ipc: host
+ command: start-dev
+ restart: always
+
+ productivity-suite-xeon-react-ui-server:
+ image: ${REGISTRY:-opea}/productivity-suite-react-ui-server:${TAG:-latest}
+ container_name: productivity-suite-xeon-react-ui-server
+ ports:
+ - "5174:80"
+ environment:
+ - APP_BACKEND_SERVICE_ENDPOINT_CHATQNA=${BACKEND_SERVICE_ENDPOINT_CHATQNA}
+ - APP_BACKEND_SERVICE_ENDPOINT_CODEGEN=${BACKEND_SERVICE_ENDPOINT_CODEGEN}
+ - APP_BACKEND_SERVICE_ENDPOINT_DOCSUM=${BACKEND_SERVICE_ENDPOINT_DOCSUM}
+ - APP_BACKEND_SERVICE_ENDPOINT_FAQGEN=${BACKEND_SERVICE_ENDPOINT_FAQGEN}
+ - APP_DATAPREP_SERVICE_ENDPOINT=${DATAPREP_SERVICE_ENDPOINT}
+ - APP_DATAPREP_GET_FILE_ENDPOINT=${DATAPREP_GET_FILE_ENDPOINT}
+ - APP_DATAPREP_DELETE_FILE_ENDPOINT=${DATAPREP_DELETE_FILE_ENDPOINT}
+ - APP_CHAT_HISTORY_CREATE_ENDPOINT=${CHAT_HISTORY_CREATE_ENDPOINT}
+ - APP_CHAT_HISTORY_DELETE_ENDPOINT=${CHAT_HISTORY_DELETE_ENDPOINT}
+ - APP_CHAT_HISTORY_GET_ENDPOINT=${CHAT_HISTORY_GET_ENDPOINT}
+ - APP_PROMPT_SERVICE_GET_ENDPOINT=${PROMPT_SERVICE_GET_ENDPOINT}
+ - APP_PROMPT_SERVICE_CREATE_ENDPOINT=${PROMPT_SERVICE_CREATE_ENDPOINT}
+ - APP_KEYCLOAK_SERVICE_ENDPOINT=${KEYCLOAK_SERVICE_ENDPOINT}
+ ipc: host
+ restart: always
+networks:
+ default:
+ driver: bridge
diff --git a/ProductivitySuite/docker/xeon/keycloak_setup_guide.md b/ProductivitySuite/docker/xeon/keycloak_setup_guide.md
new file mode 100644
index 000000000..fdd5be4f5
--- /dev/null
+++ b/ProductivitySuite/docker/xeon/keycloak_setup_guide.md
@@ -0,0 +1,21 @@
+# Keycloak Configuration Setup
+
+This document show you step-by-step how to configure Keycloak settings.
+
+The user management is done via Keycloak and the configuration steps look like this:
+
+1. Access the Keycloak admin console via url http:${host_ip}:8080 or endpoint that exposed from your kubernetes cluster to configure user. Use default username(admin) and password(admin) to login.
+ ![project-screenshot](../../assets/img/keycloak_login.png)
+2. Create a new realm named **productivitysuite** within Keycloak.
+ ![project-screenshot](../../assets/img/create_realm.png)
+ ![project-screenshot](../../assets/img/create_productivitysuite_realm.png)
+3. Create a new client called **productivitysuite** with default configurations.
+ ![project-screenshot](../../assets/img/create_client.png)
+4. Select the **productivitysuite** client that created just now. Insert your ProductivitySuite UI url endpoint into "Valid redirect URIs" and "Web origins" field. Example as screenshot below:
+ ![project-screenshot](../../assets/img/productivitysuite_client_settings.png)
+5. From the left pane select the Realm roles and create a new role name as user and another new role as viewer.
+ ![project-screenshot](../../assets/img/create_roles.png)
+6. Create a new user name as for example mary and another user as bob. Set passwords for both users (set 'Temporary' to 'Off'). Select Role mapping on the top, assign the user role to mary and assign the viewer role to bob.
+ ![project-screenshot](../../assets/img/create_users.png)
+ ![project-screenshot](../../assets/img/set_user_password.png)
+ ![project-screenshot](../../assets/img/user_role_mapping.png)
diff --git a/ProductivitySuite/docker/xeon/set_env.sh b/ProductivitySuite/docker/xeon/set_env.sh
new file mode 100644
index 000000000..ccc80d728
--- /dev/null
+++ b/ProductivitySuite/docker/xeon/set_env.sh
@@ -0,0 +1,48 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+export MONGO_HOST=${host_ip}
+export MONGO_PORT=27017
+export DB_NAME="opea"
+export COLLECTION_NAME="Conversations"
+export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
+export RERANK_MODEL_ID="BAAI/bge-reranker-base"
+export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
+export LLM_MODEL_ID_CODEGEN="meta-llama/CodeLlama-7b-hf"
+export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006"
+export TEI_RERANKING_ENDPOINT="http://${host_ip}:8808"
+export TGI_LLM_ENDPOINT="http://${host_ip}:9009"
+export REDIS_URL="redis://${host_ip}:6379"
+export INDEX_NAME="rag-redis"
+export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
+export MEGA_SERVICE_HOST_IP=${host_ip}
+export EMBEDDING_SERVICE_HOST_IP=${host_ip}
+export RETRIEVER_SERVICE_HOST_IP=${host_ip}
+export RERANK_SERVICE_HOST_IP=${host_ip}
+export LLM_SERVICE_HOST_IP=${host_ip}
+export LLM_SERVICE_HOST_IP_DOCSUM=${host_ip}
+export LLM_SERVICE_HOST_IP_FAQGEN=${host_ip}
+export LLM_SERVICE_HOST_IP_CODEGEN=${host_ip}
+export LLM_SERVICE_HOST_IP_CHATQNA=${host_ip}
+export TGI_LLM_ENDPOINT_CHATQNA="http://${host_ip}:9009"
+export TGI_LLM_ENDPOINT_CODEGEN="http://${host_ip}:8028"
+export TGI_LLM_ENDPOINT_FAQGEN="http://${host_ip}:9009"
+export TGI_LLM_ENDPOINT_DOCSUM="http://${host_ip}:9009"
+export BACKEND_SERVICE_ENDPOINT_CHATQNA="http://${host_ip}:8888/v1/chatqna"
+export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6009/v1/dataprep/delete_file"
+export BACKEND_SERVICE_ENDPOINT_FAQGEN="http://${host_ip}:8889/v1/faqgen"
+export BACKEND_SERVICE_ENDPOINT_CODEGEN="http://${host_ip}:7778/v1/codegen"
+export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${host_ip}:8890/v1/docsum"
+export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep"
+export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get_file"
+export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
+export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
+export CHAT_HISTORY_DELETE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/delete"
+export CHAT_HISTORY_GET_ENDPOINT="http://${host_ip}:6012/v1/chathistory/get"
+export PROMPT_SERVICE_GET_ENDPOINT="http://${host_ip}:6015/v1/prompt/get"
+export PROMPT_SERVICE_CREATE_ENDPOINT="http://${host_ip}:6015/v1/prompt/create"
+export KEYCLOAK_SERVICE_ENDPOINT="http://${host_ip}:8080"
+export LLM_SERVICE_HOST_PORT_FAQGEN=9002
+export LLM_SERVICE_HOST_PORT_CODEGEN=9001
+export LLM_SERVICE_HOST_PORT_DOCSUM=9003
+export PROMPT_COLLECTION_NAME="prompt"
diff --git a/ProductivitySuite/kubernetes/manifests/README.md b/ProductivitySuite/kubernetes/manifests/README.md
new file mode 100644
index 000000000..588374cdb
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/README.md
@@ -0,0 +1,70 @@
+# Deploy ProductivitySuite with ReactUI
+
+The document outlines the deployment steps for ProductivitySuite via Kubernetes cluster while utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline components and ReactUI, a popular React-based user interface library.
+
+In ProductivitySuite, it consists of following pipelines/examples and components:
+```
+- productivity-suite-react-ui
+- chatqna
+- codegen
+- docsum
+- faqgen
+- dataprep via redis
+- chat-history
+- prompt-registry
+- mongo
+- keycloak
+```
+
+## Prerequisites for Deploying ProductivitySuite with ReactUI:
+To begin with, ensure that you have following prerequisites in place:
+
+1. Kubernetes installation: Make sure that you have Kubernetes installed.
+2. Images: Make sure you have all the images ready for the examples and components stated above. You may refer to [README](../../docker/xeon/README.md) for steps to build the images.
+3. Configuration Values: Set the following values in all the yaml files before proceeding with the deployment:
+ #### a. HUGGINGFACEHUB_API_TOKEN (Your HuggingFace token to download your desired model from HuggingFace):
+ ```
+ # You may set the HUGGINGFACEHUB_API_TOKEN via method:
+ export HUGGINGFACEHUB_API_TOKEN="YourOwnToken"
+ cd GenAIExamples/ProductivitySuite/kubernetes/manifests/xeon/
+ sed -i "s/insert-your-huggingface-token-here/${HUGGINGFACEHUB_API_TOKEN}/g" *.yaml
+ ```
+
+ #### b. Set the proxies based on your network configuration
+ ```
+ # Look for http_proxy, https_proxy and no_proxy key and fill up the values for all the yaml files with your system proxy configuration.
+ ```
+
+ #### c. Set all the backend service endpoint for REACT UI service
+ ```
+ # Setup all the backend service endpoint in productivity_suite_reactui.yaml for UI to consume with.
+ # Look for ENDPOINT in the yaml and insert all the url endpoint for all the required backend service.
+ ```
+4. MODEL_ID and model-volume (OPTIONAL): You may as well customize the "MODEL_ID" to use different model and model-volume for the volume to be mounted.
+5. After finish with steps above, you can proceed with the deployment of the yaml file.
+
+## Deploying ProductivitySuite
+You can use yaml files in xeon folder to deploy ProductivitySuite with reactUI.
+```
+cd GenAIExamples/ProductivitySuite/kubernetes/manifests/xeon/
+kubectl apply -f *.yaml
+```
+
+## User Management via Keycloak Configuration
+Please refer to [keycloak_setup_guide](../../docker/xeon/keycloak_setup_guide.md) for more detail related to Keycloak configuration setup.
+
+## Verify Services
+To verify the installation, run command 'kubectl get pod' to make sure all pods are running.
+
+To view all the available services, run command 'kubectl get svc' to obtain ports that need to used as backend service endpoint in productivity_suite_reactui.yaml.
+
+You may use `kubectl port-forward service/ /` to forward the port of all the services if necessary.
+```
+# For example, 'kubectl get svc | grep productivity'
+productivity-suite-react-ui ClusterIP 10.96.3.236 80/TCP
+
+# By default, productivity-suite-react-ui service export port 80, forward it to 5174 via command:
+'kubectl port-forward service/productivity-suite-react-ui 5174:80'
+```
+
+You may open up the productivity suite react UI by using http://localhost:5174 in the browser.
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/chat_history.yaml b/ProductivitySuite/kubernetes/manifests/xeon/chat_history.yaml
new file mode 100644
index 000000000..bc90d35c9
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/chat_history.yaml
@@ -0,0 +1,75 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chat-history-config
+data:
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ MONGO_HOST: "mongo"
+ MONGO_PORT: "27017"
+ DB_NAME: "OPEA"
+ COLLECTION_NAME: "ChatHistory"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: chat-history
+ labels:
+ helm.sh/chart: chat-history-0.1.0
+ app.kubernetes.io/name: chat-history
+ app.kubernetes.io/instance: chat-history
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 6012
+ targetPort: 6012
+ protocol: TCP
+ name: chat-history
+ selector:
+ app.kubernetes.io/name: chat-history
+ app.kubernetes.io/instance: chat-history
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chat-history
+ labels:
+ helm.sh/chart: chat-history-0.1.0
+ app.kubernetes.io/name: chat-history
+ app.kubernetes.io/instance: chat-history
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: chat-history
+ app.kubernetes.io/instance: chat-history
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: chat-history
+ app.kubernetes.io/instance: chat-history
+ spec:
+ securityContext: null
+ containers:
+ - name: chat-history
+ envFrom:
+ - configMapRef:
+ name: chat-history-config
+ securityContext: null
+ image: "opea/chathistory-mongo-server:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: chat-history
+ containerPort: 6012
+ protocol: TCP
+ resources: null
+---
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/chatqna.yaml b/ProductivitySuite/kubernetes/manifests/xeon/chatqna.yaml
new file mode 100644
index 000000000..be2dfb5cb
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/chatqna.yaml
@@ -0,0 +1,1101 @@
+---
+# Source: chatqna/charts/data-prep/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-data-prep-config
+ labels:
+ helm.sh/chart: data-prep-0.8.0
+ app.kubernetes.io/name: data-prep
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TEI_ENDPOINT: "http://chatqna-tei"
+ REDIS_URL: "redis://chatqna-redis-vector-db:6379"
+ INDEX_NAME: "rag-redis"
+ HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
+ HF_HOME: "/tmp/.cache/huggingface"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ LANGCHAIN_TRACING_V2: "false"
+ LANGCHAIN_API_KEY: "insert-your-langchain-key-here"
+ LANGCHAIN_PROJECT: "opea-dataprep-service"
+---
+# Source: chatqna/charts/embedding-usvc/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-embedding-usvc-config
+ labels:
+ helm.sh/chart: embedding-usvc-0.8.0
+ app.kubernetes.io/name: embedding-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TEI_EMBEDDING_ENDPOINT: "http://chatqna-tei"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ LANGCHAIN_TRACING_V2: "false"
+ LANGCHAIN_API_KEY: insert-your-langchain-key-here
+ LANGCHAIN_PROJECT: "opea-embedding-service"
+---
+# Source: chatqna/charts/llm-uservice/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-llm-uservice-config
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TGI_LLM_ENDPOINT: "http://chatqna-tgi"
+ HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
+ HF_HOME: "/tmp/.cache/huggingface"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ LANGCHAIN_TRACING_V2: "false"
+ LANGCHAIN_API_KEY: insert-your-langchain-key-here
+ LANGCHAIN_PROJECT: "opea-llm-uservice"
+---
+# Source: chatqna/charts/reranking-usvc/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-reranking-usvc-config
+ labels:
+ helm.sh/chart: reranking-usvc-0.8.0
+ app.kubernetes.io/name: reranking-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TEI_RERANKING_ENDPOINT: "http://chatqna-teirerank"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ LANGCHAIN_TRACING_V2: "false"
+ LANGCHAIN_API_KEY: "insert-your-langchain-key-here"
+ LANGCHAIN_PROJECT: "opea-reranking-service"
+---
+# Source: chatqna/charts/retriever-usvc/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-retriever-usvc-config
+ labels:
+ helm.sh/chart: retriever-usvc-0.8.0
+ app.kubernetes.io/name: retriever-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TEI_EMBEDDING_ENDPOINT: "http://chatqna-tei"
+ REDIS_URL: "redis://chatqna-redis-vector-db:6379"
+ INDEX_NAME: "rag-redis"
+ EASYOCR_MODULE_PATH: "/tmp/.EasyOCR"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ LANGCHAIN_TRACING_V2: "false"
+ LANGCHAIN_API_KEY: "insert-your-langchain-key-here"
+ LANGCHAIN_PROJECT: "opea-retriever-service"
+ HF_HOME: "/tmp/.cache/huggingface"
+---
+# Source: chatqna/charts/tei/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-tei-config
+ labels:
+ helm.sh/chart: tei-0.8.0
+ app.kubernetes.io/name: tei
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.2"
+ app.kubernetes.io/managed-by: Helm
+data:
+ MODEL_ID: "BAAI/bge-base-en-v1.5"
+ PORT: "2081"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ NUMBA_CACHE_DIR: "/tmp"
+ TRANSFORMERS_CACHE: "/tmp/transformers_cache"
+ HF_HOME: "/tmp/.cache/huggingface"
+ MAX_WARMUP_SEQUENCE_LENGTH: "512"
+---
+# Source: chatqna/charts/teirerank/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-teirerank-config
+ labels:
+ helm.sh/chart: teirerank-0.8.0
+ app.kubernetes.io/name: teirerank
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.2"
+ app.kubernetes.io/managed-by: Helm
+data:
+ MODEL_ID: "BAAI/bge-reranker-base"
+ PORT: "2082"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ NUMBA_CACHE_DIR: "/tmp"
+ TRANSFORMERS_CACHE: "/tmp/transformers_cache"
+ HF_HOME: "/tmp/.cache/huggingface"
+---
+# Source: chatqna/charts/tgi/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: chatqna-tgi-config
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ MODEL_ID: "Intel/neural-chat-7b-v3-3"
+ PORT: "2080"
+ HUGGING_FACE_HUB_TOKEN: "insert-your-huggingface-token-here"
+ HF_TOKEN: "insert-your-huggingface-token-here"
+ MAX_INPUT_TOKENS: "1024"
+ MAX_TOTAL_TOKENS: "4096"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ HABANA_LOGS: "/tmp/habana_logs"
+ NUMBA_CACHE_DIR: "/tmp"
+ TRANSFORMERS_CACHE: "/tmp/transformers_cache"
+ HF_HOME: "/tmp/.cache/huggingface"
+---
+# Source: chatqna/charts/data-prep/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-data-prep
+ labels:
+ helm.sh/chart: data-prep-0.8.0
+ app.kubernetes.io/name: data-prep
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 6007
+ targetPort: 6007
+ protocol: TCP
+ name: data-prep
+ selector:
+ app.kubernetes.io/name: data-prep
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/embedding-usvc/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-embedding-usvc
+ labels:
+ helm.sh/chart: embedding-usvc-0.8.0
+ app.kubernetes.io/name: embedding-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 6000
+ targetPort: 6000
+ protocol: TCP
+ name: embedding-usvc
+ selector:
+ app.kubernetes.io/name: embedding-usvc
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/llm-uservice/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 9000
+ targetPort: 9000
+ protocol: TCP
+ name: llm-uservice
+ selector:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/redis-vector-db/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-redis-vector-db
+ labels:
+ helm.sh/chart: redis-vector-db-0.8.0
+ app.kubernetes.io/name: redis-vector-db
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "7.2.0-v9"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 6379
+ targetPort: 6379
+ protocol: TCP
+ name: redis-service
+ - port: 8001
+ targetPort: 8001
+ protocol: TCP
+ name: redis-insight
+ selector:
+ app.kubernetes.io/name: redis-vector-db
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/reranking-usvc/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-reranking-usvc
+ labels:
+ helm.sh/chart: reranking-usvc-0.8.0
+ app.kubernetes.io/name: reranking-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8000
+ targetPort: 8000
+ protocol: TCP
+ name: reranking-usvc
+ selector:
+ app.kubernetes.io/name: reranking-usvc
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/retriever-usvc/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-retriever-usvc
+ labels:
+ helm.sh/chart: retriever-usvc-0.8.0
+ app.kubernetes.io/name: retriever-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 7000
+ targetPort: 7000
+ protocol: TCP
+ name: retriever-usvc
+ selector:
+ app.kubernetes.io/name: retriever-usvc
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/tei/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-tei
+ labels:
+ helm.sh/chart: tei-0.8.0
+ app.kubernetes.io/name: tei
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.2"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 2081
+ protocol: TCP
+ name: tei
+ selector:
+ app.kubernetes.io/name: tei
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/teirerank/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-teirerank
+ labels:
+ helm.sh/chart: teirerank-0.8.0
+ app.kubernetes.io/name: teirerank
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.2"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 2082
+ protocol: TCP
+ name: teirerank
+ selector:
+ app.kubernetes.io/name: teirerank
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/tgi/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 2080
+ protocol: TCP
+ name: tgi
+ selector:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: chatqna
+ labels:
+ helm.sh/chart: chatqna-0.8.0
+ app.kubernetes.io/name: chatqna
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8888
+ targetPort: 8888
+ protocol: TCP
+ name: chatqna
+ selector:
+ app.kubernetes.io/name: chatqna
+ app.kubernetes.io/instance: chatqna
+---
+# Source: chatqna/charts/data-prep/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-data-prep
+ labels:
+ helm.sh/chart: data-prep-0.8.0
+ app.kubernetes.io/name: data-prep
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: data-prep
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: data-prep
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: chatqna
+ envFrom:
+ - configMapRef:
+ name: chatqna-data-prep-config
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: false
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/dataprep-redis:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: data-prep
+ containerPort: 6007
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ resources:
+ {}
+ volumes:
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/embedding-usvc/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-embedding-usvc
+ labels:
+ helm.sh/chart: embedding-usvc-0.8.0
+ app.kubernetes.io/name: embedding-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: embedding-usvc
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: embedding-usvc
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: chatqna
+ envFrom:
+ - configMapRef:
+ name: chatqna-embedding-usvc-config
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/embedding-tei:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: embedding-usvc
+ containerPort: 6000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ resources:
+ {}
+ volumes:
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/llm-uservice/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: chatqna
+ envFrom:
+ - configMapRef:
+ name: chatqna-llm-uservice-config
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: false
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/llm-tgi:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: llm-uservice
+ containerPort: 9000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ startupProbe:
+ exec:
+ command:
+ - curl
+ - http://chatqna-tgi
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ failureThreshold: 120
+ resources:
+ {}
+ volumes:
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/redis-vector-db/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-redis-vector-db
+ labels:
+ helm.sh/chart: redis-vector-db-0.8.0
+ app.kubernetes.io/name: redis-vector-db
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "7.2.0-v9"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: redis-vector-db
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: redis-vector-db
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: redis-vector-db
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "redis/redis-stack:7.2.0-v9"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /data
+ name: data-volume
+ - mountPath: /redisinsight
+ name: redisinsight-volume
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: redis-service
+ containerPort: 6379
+ protocol: TCP
+ - name: redis-insight
+ containerPort: 8001
+ protocol: TCP
+ startupProbe:
+ tcpSocket:
+ port: 6379 # Probe the Redis port
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ failureThreshold: 120
+ resources:
+ {}
+ volumes:
+ - name: data-volume
+ emptyDir: {}
+ - name: redisinsight-volume
+ emptyDir: {}
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/reranking-usvc/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-reranking-usvc
+ labels:
+ helm.sh/chart: reranking-usvc-0.8.0
+ app.kubernetes.io/name: reranking-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: reranking-usvc
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: reranking-usvc
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: chatqna
+ envFrom:
+ - configMapRef:
+ name: chatqna-reranking-usvc-config
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/reranking-tei:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: reranking-usvc
+ containerPort: 8000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ resources:
+ {}
+ volumes:
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/retriever-usvc/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-retriever-usvc
+ labels:
+ helm.sh/chart: retriever-usvc-0.8.0
+ app.kubernetes.io/name: retriever-usvc
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: retriever-usvc
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: retriever-usvc
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: chatqna
+ envFrom:
+ - configMapRef:
+ name: chatqna-retriever-usvc-config
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/retriever-redis:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: retriever-usvc
+ containerPort: 7000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ startupProbe:
+ exec:
+ command:
+ - curl
+ - http://chatqna-tei
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ failureThreshold: 120
+ resources:
+ {}
+ volumes:
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/tei/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-tei
+ labels:
+ helm.sh/chart: tei-0.8.0
+ app.kubernetes.io/name: tei
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.2"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: tei
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: tei
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: tei
+ envFrom:
+ - configMapRef:
+ name: chatqna-tei-config
+ securityContext:
+ {}
+ image: "ghcr.io/huggingface/text-embeddings-inference:cpu-1.5"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /data
+ name: model-volume
+ - mountPath: /dev/shm
+ name: shm
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: http
+ containerPort: 2081
+ protocol: TCP
+ resources:
+ {}
+ volumes:
+ - name: model-volume
+ hostPath:
+ path: /mnt/opea-models
+ type: Directory
+ - name: shm
+ emptyDir:
+ medium: Memory
+ sizeLimit: 1Gi
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/teirerank/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-teirerank
+ labels:
+ helm.sh/chart: teirerank-0.8.0
+ app.kubernetes.io/name: teirerank
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.2"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: teirerank
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: teirerank
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: teirerank
+ envFrom:
+ - configMapRef:
+ name: chatqna-teirerank-config
+ securityContext:
+ {}
+ image: "ghcr.io/huggingface/text-embeddings-inference:cpu-1.5"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /data
+ name: model-volume
+ - mountPath: /dev/shm
+ name: shm
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: http
+ containerPort: 2082
+ protocol: TCP
+ resources:
+ {}
+ volumes:
+ - name: model-volume
+ hostPath:
+ path: /mnt/opea-models
+ type: Directory
+ - name: shm
+ emptyDir:
+ medium: Memory
+ sizeLimit: 1Gi
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/charts/tgi/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: tgi
+ envFrom:
+ - configMapRef:
+ name: chatqna-tgi-config
+ securityContext:
+ {}
+ image: "ghcr.io/huggingface/text-generation-inference:2.1.0"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /data
+ name: model-volume
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: http
+ containerPort: 2080
+ protocol: TCP
+ resources:
+ {}
+ volumes:
+ - name: model-volume
+ hostPath:
+ path: /mnt/opea-models
+ type: Directory
+ - name: tmp
+ emptyDir: {}
+---
+# Source: chatqna/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: chatqna
+ labels:
+ helm.sh/chart: chatqna-0.8.0
+ app.kubernetes.io/name: chatqna
+ app.kubernetes.io/instance: chatqna
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: chatqna
+ app.kubernetes.io/instance: chatqna
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: chatqna
+ app.kubernetes.io/instance: chatqna
+ spec:
+ securityContext:
+ null
+ containers:
+ - name: chatqna
+ env:
+ - name: LLM_SERVICE_HOST_IP
+ value: chatqna-llm-uservice
+ - name: RERANK_SERVICE_HOST_IP
+ value: chatqna-reranking-usvc
+ - name: RETRIEVER_SERVICE_HOST_IP
+ value: chatqna-retriever-usvc
+ - name: EMBEDDING_SERVICE_HOST_IP
+ value: chatqna-embedding-usvc
+ - name: http_proxy
+ value: ""
+ - name: https_proxy
+ value: ""
+ - name: no_proxy
+ value: ""
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/chatqna:latest"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: chatqna
+ containerPort: 8888
+ protocol: TCP
+ # startupProbe:
+ # httpGet:
+ # host: chatqna-llm-uservice
+ # port: 9000
+ # path: /
+ # initialDelaySeconds: 5
+ # periodSeconds: 5
+ # failureThreshold: 120
+ # livenessProbe:
+ # httpGet:
+ # path: /
+ # port: 8888
+ # readinessProbe:
+ # httpGet:
+ # path: /
+ # port: 8888
+ resources:
+ null
+ volumes:
+ - name: tmp
+ emptyDir: {}
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/codegen.yaml b/ProductivitySuite/kubernetes/manifests/xeon/codegen.yaml
new file mode 100644
index 000000000..b9f38e9f7
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/codegen.yaml
@@ -0,0 +1,333 @@
+---
+# Source: codegen/charts/llm-uservice/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: codegen-llm-uservice-config
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TGI_LLM_ENDPOINT: "http://codegen-tgi"
+ HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
+ HF_HOME: "/tmp/.cache/huggingface"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ LANGCHAIN_TRACING_V2: "false"
+ LANGCHAIN_API_KEY: insert-your-langchain-key-here
+ LANGCHAIN_PROJECT: "opea-llm-uservice"
+---
+# Source: codegen/charts/tgi/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: codegen-tgi-config
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.4"
+ app.kubernetes.io/managed-by: Helm
+data:
+ MODEL_ID: "meta-llama/CodeLlama-7b-hf"
+ PORT: "2080"
+ HUGGING_FACE_HUB_TOKEN: "insert-your-huggingface-token-here"
+ HF_TOKEN: "insert-your-huggingface-token-here"
+ MAX_INPUT_TOKENS: "1024"
+ MAX_TOTAL_TOKENS: "4096"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ HABANA_LOGS: "/tmp/habana_logs"
+ NUMBA_CACHE_DIR: "/tmp"
+ TRANSFORMERS_CACHE: "/tmp/transformers_cache"
+ HF_HOME: "/tmp/.cache/huggingface"
+---
+# Source: codegen/charts/llm-uservice/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: codegen-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 9000
+ targetPort: 9000
+ protocol: TCP
+ name: llm-uservice
+ selector:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: codegen
+---
+# Source: codegen/charts/tgi/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: codegen-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.4"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 2080
+ protocol: TCP
+ name: tgi
+ selector:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: codegen
+---
+# Source: codegen/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: codegen
+ labels:
+ helm.sh/chart: codegen-0.8.0
+ app.kubernetes.io/name: codegen
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 7778
+ targetPort: 7778
+ protocol: TCP
+ name: codegen
+ selector:
+ app.kubernetes.io/name: codegen
+ app.kubernetes.io/instance: codegen
+---
+# Source: codegen/charts/llm-uservice/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: codegen-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: codegen
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: codegen
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: codegen
+ envFrom:
+ - configMapRef:
+ name: codegen-llm-uservice-config
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: false
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/llm-tgi:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: llm-uservice
+ containerPort: 9000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ startupProbe:
+ exec:
+ command:
+ - curl
+ - http://codegen-tgi
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ failureThreshold: 120
+ resources:
+ {}
+ volumes:
+ - name: tmp
+ emptyDir: {}
+---
+# Source: codegen/charts/tgi/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: codegen-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.4"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: codegen
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: codegen
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: tgi
+ envFrom:
+ - configMapRef:
+ name: codegen-tgi-config
+ securityContext:
+ {}
+ image: "ghcr.io/huggingface/text-generation-inference:1.4"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /data
+ name: model-volume
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: http
+ containerPort: 2080
+ protocol: TCP
+ resources:
+ {}
+ volumes:
+ - name: model-volume
+ hostPath:
+ path: /mnt/opea-models
+ type: Directory
+ - name: tmp
+ emptyDir: {}
+---
+# Source: codegen/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: codegen
+ labels:
+ helm.sh/chart: codegen-0.8.0
+ app.kubernetes.io/name: codegen
+ app.kubernetes.io/instance: codegen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: codegen
+ app.kubernetes.io/instance: codegen
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: codegen
+ app.kubernetes.io/instance: codegen
+ spec:
+ securityContext:
+ null
+ containers:
+ - name: codegen
+ env:
+ - name: LLM_SERVICE_HOST_IP
+ value: codegen-llm-uservice
+ - name: http_proxy
+ value: ""
+ - name: https_proxy
+ value: ""
+ - name: no_proxy
+ value: ""
+ #- name: MEGA_SERVICE_PORT
+ # value: 7778
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/codegen:latest"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: codegen
+ containerPort: 7778
+ protocol: TCP
+ # startupProbe:
+ # httpGet:
+ # host: codegen-llm-uservice
+ # port: 9000
+ # path: /
+ # initialDelaySeconds: 5
+ # periodSeconds: 5
+ # failureThreshold: 120
+ # livenessProbe:
+ # httpGet:
+ # path: /
+ # port: 7778
+ # readinessProbe:
+ # httpGet:
+ # path: /
+ # port: 7778
+ resources:
+ null
+ volumes:
+ - name: tmp
+ emptyDir: {}
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/docsum.yaml b/ProductivitySuite/kubernetes/manifests/xeon/docsum.yaml
new file mode 100644
index 000000000..dba69096a
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/docsum.yaml
@@ -0,0 +1,317 @@
+---
+# Source: docsum/charts/llm-uservice/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: docsum-llm-uservice-config
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TGI_LLM_ENDPOINT: "http://docsum-tgi"
+ HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
+ HF_HOME: "/tmp/.cache/huggingface"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ LANGCHAIN_TRACING_V2: "false"
+ LANGCHAIN_API_KEY: insert-your-langchain-key-here
+ LANGCHAIN_PROJECT: "opea-llm-uservice"
+---
+# Source: docsum/charts/tgi/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: docsum-tgi-config
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ MODEL_ID: "Intel/neural-chat-7b-v3-3"
+ PORT: "2080"
+ HUGGING_FACE_HUB_TOKEN: "insert-your-huggingface-token-here"
+ HF_TOKEN: "insert-your-huggingface-token-here"
+ MAX_INPUT_TOKENS: "1024"
+ MAX_TOTAL_TOKENS: "4096"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ HABANA_LOGS: "/tmp/habana_logs"
+ NUMBA_CACHE_DIR: "/tmp"
+ TRANSFORMERS_CACHE: "/tmp/transformers_cache"
+ HF_HOME: "/tmp/.cache/huggingface"
+---
+# Source: docsum/charts/llm-uservice/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: docsum-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 9000
+ targetPort: 9000
+ protocol: TCP
+ name: llm-uservice
+ selector:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: docsum
+---
+# Source: docsum/charts/tgi/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: docsum-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 2080
+ protocol: TCP
+ name: tgi
+ selector:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: docsum
+---
+# Source: docsum/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: docsum
+ labels:
+ helm.sh/chart: docsum-0.8.0
+ app.kubernetes.io/name: docsum
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8888
+ targetPort: 8888
+ protocol: TCP
+ name: docsum
+ selector:
+ app.kubernetes.io/name: docsum
+ app.kubernetes.io/instance: docsum
+---
+# Source: docsum/charts/llm-uservice/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: docsum-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: docsum
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: docsum
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: docsum
+ envFrom:
+ - configMapRef:
+ name: docsum-llm-uservice-config
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: false
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/llm-docsum-tgi:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: llm-uservice
+ containerPort: 9000
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ startupProbe:
+ exec:
+ command:
+ - curl
+ - http://docsum-tgi
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ failureThreshold: 120
+ resources:
+ {}
+ volumes:
+ - name: tmp
+ emptyDir: {}
+---
+# Source: docsum/charts/tgi/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: docsum-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: docsum
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: docsum
+ spec:
+ securityContext:
+ {}
+ containers:
+ - name: tgi
+ envFrom:
+ - configMapRef:
+ name: docsum-tgi-config
+ securityContext:
+ {}
+ image: "ghcr.io/huggingface/text-generation-inference:2.1.0"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /data
+ name: model-volume
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: http
+ containerPort: 2080
+ protocol: TCP
+ resources:
+ {}
+ volumes:
+ - name: model-volume
+ hostPath:
+ path: /mnt/opea-models
+ type: Directory
+ - name: tmp
+ emptyDir: {}
+---
+# Source: docsum/templates/deployment.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: docsum
+ labels:
+ helm.sh/chart: docsum-0.8.0
+ app.kubernetes.io/name: docsum
+ app.kubernetes.io/instance: docsum
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: docsum
+ app.kubernetes.io/instance: docsum
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: docsum
+ app.kubernetes.io/instance: docsum
+ spec:
+ securityContext:
+ null
+ containers:
+ - name: docsum
+ env:
+ - name: LLM_SERVICE_HOST_IP
+ value: docsum-llm-uservice
+ - name: http_proxy
+ value: ""
+ - name: https_proxy
+ value: ""
+ - name: no_proxy
+ value: ""
+ #- name: MEGA_SERVICE_PORT
+ # value: 8888
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ seccompProfile:
+ type: RuntimeDefault
+ image: "opea/docsum:latest"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp
+ ports:
+ - name: docsum
+ containerPort: 8888
+ protocol: TCP
+ resources:
+ null
+ volumes:
+ - name: tmp
+ emptyDir: {}
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/faqgen.yaml b/ProductivitySuite/kubernetes/manifests/xeon/faqgen.yaml
new file mode 100644
index 000000000..b6f089f4d
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/faqgen.yaml
@@ -0,0 +1,243 @@
+---
+# Source: faqgen/charts/llm-uservice/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: faqgen-llm-uservice-config
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ TGI_LLM_ENDPOINT: "http://faqgen-tgi:80"
+ HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+---
+# Source: faqgen/charts/tgi/templates/configmap.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: faqgen-tgi-config
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+data:
+ MODEL_ID: "Intel/neural-chat-7b-v3-3"
+ PORT: "80"
+ HUGGINGFACEHUB_API_TOKEN: "insert-your-huggingface-token-here"
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+---
+# Source: faqgen/charts/llm-uservice/charts/tgi/templates/service.yaml
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: faqgen-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: tgi
+ selector:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: faqgen
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: faqgen-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 9000
+ targetPort: 9000
+ protocol: TCP
+ name: llm-uservice
+ selector:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: faqgen
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: faqgen
+ labels:
+ helm.sh/chart: faqgen-0.8.0
+ app.kubernetes.io/name: faqgen
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 8888
+ targetPort: 8888
+ protocol: TCP
+ name: faqgen
+ selector:
+ app.kubernetes.io/name: faqgen
+ app.kubernetes.io/instance: faqgen
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: faqgen-tgi
+ labels:
+ helm.sh/chart: tgi-0.8.0
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "2.1.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: faqgen
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: tgi
+ app.kubernetes.io/instance: faqgen
+ spec:
+ securityContext: {}
+ containers:
+ - name: tgi
+ envFrom:
+ - configMapRef:
+ name: faqgen-tgi-config
+ securityContext: {}
+ image: "ghcr.io/huggingface/text-generation-inference:2.1.0"
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - mountPath: /data
+ name: model-volume
+ ports:
+ - name: http
+ containerPort: 80
+ protocol: TCP
+ resources: {}
+ volumes:
+ - name: model-volume
+ hostPath:
+ path: /mnt/opea-models
+ type: Directory
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: faqgen-llm-uservice
+ labels:
+ helm.sh/chart: llm-uservice-0.8.0
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: faqgen
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: llm-uservice
+ app.kubernetes.io/instance: faqgen
+ spec:
+ securityContext: {}
+ containers:
+ - name: faqgen
+ envFrom:
+ - configMapRef:
+ name: faqgen-llm-uservice-config
+ securityContext: {}
+ image: "opea/llm-faqgen-tgi:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: llm-uservice
+ containerPort: 9000
+ protocol: TCP
+ startupProbe:
+ exec:
+ command:
+ - curl
+ - http://faqgen-tgi:80
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ failureThreshold: 120
+ resources: {}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: faqgen
+ labels:
+ helm.sh/chart: faqgen-0.8.0
+ app.kubernetes.io/name: faqgen
+ app.kubernetes.io/instance: faqgen
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: faqgen
+ app.kubernetes.io/instance: faqgen
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: faqgen
+ app.kubernetes.io/instance: faqgen
+ spec:
+ securityContext: null
+ containers:
+ - name: faqgen
+ env:
+ - name: LLM_SERVICE_HOST_IP
+ value: faqgen-llm-uservice
+ - name: http_proxy
+ value: ""
+ - name: https_proxy
+ value: ""
+ - name: no_proxy
+ value: ""
+ securityContext: null
+ image: "opea/faqgen:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: faqgen
+ containerPort: 8888
+ protocol: TCP
+ resources: null
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/keycloak_install.yaml b/ProductivitySuite/kubernetes/manifests/xeon/keycloak_install.yaml
new file mode 100644
index 000000000..8ddb1869e
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/keycloak_install.yaml
@@ -0,0 +1,66 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: keycloak
+spec:
+ progressDeadlineSeconds: 600
+ replicas: 1
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ app: keycloak
+ template:
+ metadata:
+ labels:
+ app: keycloak
+ spec:
+ containers:
+ - args:
+ - start-dev
+ env:
+ - name: KEYCLOAK_ADMIN
+ value: admin
+ - name: KEYCLOAK_ADMIN_PASSWORD
+ value: admin
+ - name: KC_PROXY
+ value: edge
+ image: quay.io/keycloak/keycloak:25.0.2
+ imagePullPolicy: IfNotPresent
+ name: keycloak
+ ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /realms/master
+ port: 8080
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ terminationMessagePolicy: File
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: keycloak
+spec:
+ allocateLoadBalancerNodePorts: true
+ ports:
+ - name: http
+ nodePort: 31503
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ app: keycloak
+ type: LoadBalancer
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/mongo.yaml b/ProductivitySuite/kubernetes/manifests/xeon/mongo.yaml
new file mode 100644
index 000000000..aca96944e
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/mongo.yaml
@@ -0,0 +1,71 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: mongo-config
+data:
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: mongo
+ labels:
+ helm.sh/chart: mongo-0.1.0
+ app.kubernetes.io/name: mongo
+ app.kubernetes.io/instance: mongo
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 27017
+ targetPort: 27017
+ protocol: TCP
+ name: mongo
+ selector:
+ app.kubernetes.io/name: mongo
+ app.kubernetes.io/instance: mongo
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: mongo
+ labels:
+ helm.sh/chart: mongo-0.1.0
+ app.kubernetes.io/name: mongo
+ app.kubernetes.io/instance: mongo
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: mongo
+ app.kubernetes.io/instance: mongo
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: mongo
+ app.kubernetes.io/instance: mongo
+ spec:
+ securityContext: null
+ containers:
+ - name: mongo
+ envFrom:
+ - configMapRef:
+ name: mongo-config
+ securityContext: null
+ image: "mongo:7.0.11"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: mongo
+ containerPort: 27017
+ protocol: TCP
+ resources: null
+ command: ["mongod", "--bind_ip", "0.0.0.0", "--quiet", "--logpath", "/dev/null"]
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/productivity_suite_reactui.yaml b/ProductivitySuite/kubernetes/manifests/xeon/productivity_suite_reactui.yaml
new file mode 100644
index 000000000..4abb47c9f
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/productivity_suite_reactui.yaml
@@ -0,0 +1,91 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: productivity-suite-react-ui
+ labels:
+ helm.sh/chart: productivity-suite-react-ui-0.1.0
+ app.kubernetes.io/name: react-ui
+ app.kubernetes.io/instance: productivity-suite
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 80
+ targetPort: 80
+ protocol: TCP
+ name: react-ui
+ selector:
+ app.kubernetes.io/name: react-ui
+ app.kubernetes.io/instance: productivity-suite
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: productivity-suite-react-ui
+ labels:
+ helm.sh/chart: productivity-suite-react-ui-0.1.0
+ app.kubernetes.io/name: react-ui
+ app.kubernetes.io/instance: productivity-suite
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: react-ui
+ app.kubernetes.io/instance: productivity-suite
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: react-ui
+ app.kubernetes.io/instance: productivity-suite
+ spec:
+ securityContext: null
+ containers:
+ - name: productivity-suite-react-ui
+ env:
+ - name: http_proxy
+ value: ""
+ - name: https_proxy
+ value: ""
+ - name: no_proxy
+ value: ""
+ - name: APP_BACKEND_SERVICE_ENDPOINT_CHATQNA
+ value: ""
+ - name: APP_BACKEND_SERVICE_ENDPOINT_CODEGEN
+ value: ""
+ - name: APP_BACKEND_SERVICE_ENDPOINT_DOCSUM
+ value: ""
+ - name: APP_BACKEND_SERVICE_ENDPOINT_FAQGEN
+ value: ""
+ - name: APP_DATAPREP_SERVICE_ENDPOINT
+ value: ""
+ - name: APP_DATAPREP_GET_FILE_ENDPOINT
+ value: ""
+ - name: APP_DATAPREP_DELETE_FILE_ENDPOINT
+ value: ""
+ - name: APP_CHAT_HISTORY_CREATE_ENDPOINT
+ value: ""
+ - name: APP_CHAT_HISTORY_DELETE_ENDPOINT
+ value: ""
+ - name: APP_CHAT_HISTORY_GET_ENDPOINT
+ value: ""
+ - name: APP_PROMPT_SERVICE_GET_ENDPOINT
+ value: ""
+ - name: APP_PROMPT_SERVICE_CREATE_ENDPOINT
+ value: ""
+ - name: APP_KEYCLOAK_SERVICE_ENDPOINT
+ value: ""
+ securityContext: null
+ image: "opea/productivity-suite-react-ui-server:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: react-ui
+ containerPort: 80
+ protocol: TCP
+ resources: null
diff --git a/ProductivitySuite/kubernetes/manifests/xeon/prompt_registry.yaml b/ProductivitySuite/kubernetes/manifests/xeon/prompt_registry.yaml
new file mode 100644
index 000000000..972fc214e
--- /dev/null
+++ b/ProductivitySuite/kubernetes/manifests/xeon/prompt_registry.yaml
@@ -0,0 +1,75 @@
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: prompt-registry-config
+data:
+ http_proxy: ""
+ https_proxy: ""
+ no_proxy: ""
+ MONGO_HOST: "mongo"
+ MONGO_PORT: "27017"
+ DB_NAME: "OPEA"
+ COLLECTION_NAME: "Prompt"
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: prompt-registry
+ labels:
+ helm.sh/chart: prompt-registry-0.1.0
+ app.kubernetes.io/name: prompt-registry
+ app.kubernetes.io/instance: prompt-registry
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ type: ClusterIP
+ ports:
+ - port: 6012
+ targetPort: 6012
+ protocol: TCP
+ name: prompt-registry
+ selector:
+ app.kubernetes.io/name: prompt-registry
+ app.kubernetes.io/instance: prompt-registry
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: prompt-registry
+ labels:
+ helm.sh/chart: prompt-registry-0.1.0
+ app.kubernetes.io/name: prompt-registry
+ app.kubernetes.io/instance: prompt-registry
+ app.kubernetes.io/version: "1.0.0"
+ app.kubernetes.io/managed-by: Helm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: prompt-registry
+ app.kubernetes.io/instance: prompt-registry
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: prompt-registry
+ app.kubernetes.io/instance: prompt-registry
+ spec:
+ securityContext: null
+ containers:
+ - name: prompt-registry
+ envFrom:
+ - configMapRef:
+ name: prompt-registry-config
+ securityContext: null
+ image: "opea/promptregistry-mongo-server:latest"
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: prompt-registry
+ containerPort: 6012
+ protocol: TCP
+ resources: null
+---
diff --git a/ProductivitySuite/tests/test_productivitysuite_on_xeon.sh b/ProductivitySuite/tests/test_productivitysuite_on_xeon.sh
new file mode 100755
index 000000000..7879c7a4e
--- /dev/null
+++ b/ProductivitySuite/tests/test_productivitysuite_on_xeon.sh
@@ -0,0 +1,386 @@
+#!/bin/bash
+# Copyright (C) 2024 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+set -xe
+IMAGE_REPO=${IMAGE_REPO:-"opea"}
+IMAGE_TAG=${IMAGE_TAG:-"latest"}
+echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
+echo "TAG=IMAGE_TAG=${IMAGE_TAG}"
+export REGISTRY=${IMAGE_REPO}
+export TAG=${IMAGE_TAG}
+
+WORKPATH=$(dirname "$PWD")
+LOG_PATH="$WORKPATH/tests"
+ip_address=$(hostname -I | awk '{print $1}')
+
+function build_docker_images() {
+ cd $WORKPATH/docker
+ git clone https://github.com/opea-project/GenAIComps.git
+
+ echo "Build all the images with --no-cache, check docker_image_build.log for details..."
+ service_list="chatqna dataprep-redis embedding-tei promptregistry-mongo llm_docsum_server llm_faqgen chathistory-mongo retriever-redis reranking-tei llm-tgi productivity-suite-react-ui codegen docsum faqgen"
+ docker compose -f docker_build_compose.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
+
+ docker pull ghcr.io/huggingface/text-embeddings-inference:cpu-1.5
+ docker pull ghcr.io/huggingface/text-generation-inference:2.1.0
+ docker images
+}
+
+function start_services() {
+ cd $WORKPATH/docker/xeon
+
+ export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5"
+ export RERANK_MODEL_ID="BAAI/bge-reranker-base"
+ export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
+ export LLM_MODEL_ID_CODEGEN="Intel/neural-chat-7b-v3-3"
+ export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:6006"
+ export TEI_RERANKING_ENDPOINT="http://${ip_address}:8808"
+ export TGI_LLM_ENDPOINT="http://${ip_address}:9009"
+ export REDIS_URL="redis://${ip_address}:6379"
+ export REDIS_HOST=${ip_address}
+ export INDEX_NAME="rag-redis"
+ export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
+ export MEGA_SERVICE_HOST_IP=${ip_address}
+ export EMBEDDING_SERVICE_HOST_IP=${ip_address}
+ export RETRIEVER_SERVICE_HOST_IP=${ip_address}
+ export RERANK_SERVICE_HOST_IP=${ip_address}
+ export LLM_SERVICE_HOST_IP=${ip_address}
+ export LLM_SERVICE_HOST_IP_DOCSUM=${ip_address}
+ export LLM_SERVICE_HOST_IP_FAQGEN=${ip_address}
+ export LLM_SERVICE_HOST_IP_CODEGEN=${ip_address}
+ export LLM_SERVICE_HOST_IP_CHATQNA=${ip_address}
+ export TGI_LLM_ENDPOINT_CHATQNA="http://${ip_address}:9009"
+ export TGI_LLM_ENDPOINT_CODEGEN="http://${ip_address}:8028"
+ export TGI_LLM_ENDPOINT_FAQGEN="http://${ip_address}:9009"
+ export TGI_LLM_ENDPOINT_DOCSUM="http://${ip_address}:9009"
+ export BACKEND_SERVICE_ENDPOINT_CHATQNA="http://${host_ip}:8888/v1/chatqna"
+ export BACKEND_SERVICE_ENDPOINT_FAQGEN="http://${host_ip}:8889/v1/faqgen"
+ export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6009/v1/dataprep/delete_file"
+ export BACKEND_SERVICE_ENDPOINT_CODEGEN="http://${host_ip}:7778/v1/codegen"
+ export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${host_ip}:8890/v1/docsum"
+ export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep"
+ export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6008/v1/dataprep/get_file"
+ export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
+ export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create"
+ export CHAT_HISTORY_DELETE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/delete"
+ export CHAT_HISTORY_GET_ENDPOINT="http://${host_ip}:6012/v1/chathistory/get"
+ export PROMPT_SERVICE_GET_ENDPOINT="http://${host_ip}:6015/v1/prompt/get"
+ export PROMPT_SERVICE_CREATE_ENDPOINT="http://${host_ip}:6015/v1/prompt/create"
+ export KEYCLOAK_SERVICE_ENDPOINT="http://${host_ip}:8080"
+ export MONGO_HOST=${ip_address}
+ export MONGO_PORT=27017
+ export DB_NAME="opea"
+ export COLLECTION_NAME="Conversations"
+ export LLM_SERVICE_HOST_PORT_FAQGEN=9002
+ export LLM_SERVICE_HOST_PORT_CODEGEN=9001
+ export LLM_SERVICE_HOST_PORT_DOCSUM=9003
+ export PROMPT_COLLECTION_NAME="prompt"
+
+ # Start Docker Containers
+ docker compose up -d
+ n=0
+ until [[ "$n" -ge 500 ]]; do
+ docker logs tgi-service > ${LOG_PATH}/tgi_service_start.log
+ if grep -q Connected ${LOG_PATH}/tgi_service_start.log; then
+ echo "ChatQnA TGI Service Connected"
+ break
+ fi
+ sleep 1s
+ n=$((n+1))
+ done
+ n=0
+ until [[ "$n" -ge 500 ]]; do
+ docker logs tgi_service_codegen > ${LOG_PATH}/tgi_service_codegen_start.log
+ if grep -q Connected ${LOG_PATH}/tgi_service_codegen_start.log; then
+ echo "CodeGen TGI Service Connected"
+ break
+ fi
+ sleep 1s
+ n=$((n+1))
+ done
+}
+
+function validate_service() {
+ local URL="$1"
+ local EXPECTED_RESULT="$2"
+ local SERVICE_NAME="$3"
+ local DOCKER_NAME="$4"
+ local INPUT_DATA="$5"
+
+ if [[ $SERVICE_NAME == *"dataprep_upload_file"* ]]; then
+ cd $LOG_PATH
+ HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'files=@./dataprep_file.txt' -H 'Content-Type: multipart/form-data' "$URL")
+ elif [[ $SERVICE_NAME == *"dataprep_upload_link"* ]]; then
+ HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F 'link_list=["https://www.ces.tech/"]' "$URL")
+ elif [[ $SERVICE_NAME == *"dataprep_get"* ]]; then
+ HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -H 'Content-Type: application/json' "$URL")
+ elif [[ $SERVICE_NAME == *"dataprep_del"* ]]; then
+ HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d '{"file_path": "all"}' -H 'Content-Type: application/json' "$URL")
+ else
+ HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL")
+ fi
+ HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
+ RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g')
+
+ docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log
+
+ # check response status
+ if [ "$HTTP_STATUS" -ne "200" ]; then
+ echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS"
+ exit 1
+ else
+ echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..."
+ fi
+ # check response body
+ if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then
+ echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY"
+ exit 1
+ else
+ echo "[ $SERVICE_NAME ] Content is as expected."
+ fi
+
+ sleep 1s
+}
+
+function validate_microservices() {
+ # Check if the microservices are running correctly.
+
+ # tei for embedding service
+ validate_service \
+ "${ip_address}:6006/embed" \
+ "[[" \
+ "tei-embedding" \
+ "tei-embedding-server" \
+ '{"inputs":"What is Deep Learning?"}'
+
+ # embedding microservice
+ validate_service \
+ "${ip_address}:6000/v1/embeddings" \
+ '"text":"What is Deep Learning?","embedding":[' \
+ "embedding-microservice" \
+ "embedding-tei-server" \
+ '{"text":"What is Deep Learning?"}'
+
+ sleep 1m # retrieval can't curl as expected, try to wait for more time
+
+ # test /v1/dataprep upload file
+ echo "Deep learning is a subset of machine learning that utilizes neural networks with multiple layers to analyze various levels of abstract data representations. It enables computers to identify patterns and make decisions with minimal human intervention by learning from large amounts of data." > $LOG_PATH/dataprep_file.txt
+ validate_service \
+ "http://${ip_address}:6007/v1/dataprep" \
+ "Data preparation succeeded" \
+ "dataprep_upload_file" \
+ "dataprep-redis-server"
+
+ # test /v1/dataprep upload link
+ validate_service \
+ "http://${ip_address}:6007/v1/dataprep" \
+ "Data preparation succeeded" \
+ "dataprep_upload_link" \
+ "dataprep-redis-server"
+
+ # test /v1/dataprep/get_file
+ validate_service \
+ "http://${ip_address}:6007/v1/dataprep/get_file" \
+ '{"name":' \
+ "dataprep_get" \
+ "dataprep-redis-server"
+
+ # test /v1/dataprep/delete_file
+ validate_service \
+ "http://${ip_address}:6007/v1/dataprep/delete_file" \
+ '{"status":true}' \
+ "dataprep_del" \
+ "dataprep-redis-server"
+
+ # retrieval microservice
+ test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)")
+ validate_service \
+ "${ip_address}:7000/v1/retrieval" \
+ "retrieved_docs" \
+ "retrieval-microservice" \
+ "retriever-redis-server" \
+ "{\"text\":\"What is the revenue of Nike in 2023?\",\"embedding\":${test_embedding}}"
+
+ # tei for rerank microservice
+ validate_service \
+ "${ip_address}:8808/rerank" \
+ '{"index":1,"score":' \
+ "tei-rerank" \
+ "tei-reranking-server" \
+ '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}'
+
+ # rerank microservice
+ validate_service \
+ "${ip_address}:8000/v1/reranking" \
+ "Deep learning is..." \
+ "rerank-microservice" \
+ "reranking-tei-xeon-server" \
+ '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}'
+
+ # tgi for llm service
+ validate_service \
+ "${ip_address}:9009/generate" \
+ "generated_text" \
+ "tgi-llm" \
+ "tgi-service" \
+ '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}'
+
+ # ChatQnA llm microservice
+ validate_service \
+ "${ip_address}:9000/v1/chat/completions" \
+ "data: " \
+ "llm-microservice" \
+ "llm-tgi-server" \
+ '{"query":"What is Deep Learning?"}'
+
+ # FAQGen llm microservice
+ validate_service \
+ "${ip_address}:${LLM_SERVICE_HOST_PORT_FAQGEN}/v1/faqgen" \
+ "data: " \
+ "llm_faqgen" \
+ "llm-faqgen-server" \
+ '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}'
+
+ # Docsum llm microservice
+ validate_service \
+ "${ip_address}:${LLM_SERVICE_HOST_PORT_DOCSUM}/v1/chat/docsum" \
+ "data: " \
+ "llm_docsum" \
+ "llm-docsum-server" \
+ '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}'
+
+ # CodeGen llm microservice
+ validate_service \
+ "${ip_address}:${LLM_SERVICE_HOST_PORT_CODEGEN}/v1/chat/completions" \
+ "data: " \
+ "llm_codegen" \
+ "llm-tgi-server-codegen" \
+ '{"query":"def print_hello_world():"}'
+
+ result=$(curl -X 'POST' \
+ http://${ip_address}:6012/v1/chathistory/create \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "data": {
+ "messages": "test Messages", "user": "test"
+ }
+ }')
+ echo $result
+ if [[ ${#result} -eq 26 ]]; then
+ echo "Correct result."
+ else
+ echo "Incorrect result."
+ exit 1
+ fi
+
+ result=$(curl -X 'POST' \
+ http://$ip_address:6015/v1/prompt/create \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "prompt_text": "test prompt", "user": "test"
+ }')
+ echo $result
+ if [[ ${#result} -eq 26 ]]; then
+ echo "Correct result."
+ else
+ echo "Incorrect result."
+ exit 1
+ fi
+
+}
+
+
+function validate_megaservice() {
+
+
+ # Curl the ChatQnAMega Service
+ validate_service \
+ "${ip_address}:8888/v1/chatqna" \
+ "data: " \
+ "chatqna-megaservice" \
+ "chatqna-xeon-backend-server" \
+ '{"messages": "What is the revenue of Nike in 2023?"}'\
+
+
+ # Curl the FAQGen Service
+ validate_service \
+ "${ip_address}:8889/v1/faqgen" \
+ "Text Embeddings Inference" \
+ "faqgen-xeon-backend-server" \
+ "faqgen-xeon-backend-server" \
+ '{"messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}'\
+
+ # Curl the DocSum Mega Service
+ validate_service \
+ "${ip_address}:8890/v1/docsum" \
+ "toolkit" \
+ "docsum-xeon-backend-server" \
+ "docsum-xeon-backend-server" \
+ '{"messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}'
+
+
+ # Curl the CodeGen Mega Service
+ validate_service \
+ "${ip_address}:7778/v1/codegen" \
+ "print" \
+ "codegen-xeon-backend-server" \
+ "codegen-xeon-backend-server" \
+ '{"messages": "def print_hello_world():"}'
+}
+
+function validate_frontend() {
+ echo "[ TEST INFO ]: --------- frontend test started ---------"
+ cd $WORKPATH/docker/ui/react
+ local conda_env_name="OPEA_e2e"
+ export PATH=${HOME}/miniforge3/bin/:$PATH
+# conda remove -n ${conda_env_name} --all -y
+# conda create -n ${conda_env_name} python=3.12 -y
+ source activate ${conda_env_name}
+ echo "[ TEST INFO ]: --------- conda env activated ---------"
+
+# conda install -c conda-forge nodejs -y
+ npm install && npm ci
+ node -v && npm -v && pip list
+
+ exit_status=0
+ npm run test || exit_status=$?
+
+ if [ $exit_status -ne 0 ]; then
+ echo "[TEST INFO]: ---------frontend test failed---------"
+ exit $exit_status
+ else
+ echo "[TEST INFO]: ---------frontend test passed---------"
+ fi
+}
+
+function stop_docker() {
+ cd $WORKPATH/docker/xeon
+ docker compose stop && docker compose rm -f
+}
+
+function main() {
+
+ stop_docker
+ if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
+ start_time=$(date +%s)
+ start_services
+ end_time=$(date +%s)
+ duration=$((end_time-start_time))
+ echo "Mega service start duration is $duration s" && sleep 1s
+
+ validate_microservices
+ echo "==== microservices validated ===="
+ validate_megaservice
+ echo "==== megaservices validated ===="
+ validate_frontend
+ echo "==== frontend validated ===="
+
+ stop_docker
+ echo y | docker system prune
+
+}
+
+main