diff --git a/.github/workflows/scripts/build_push.sh b/.github/workflows/scripts/build_push.sh index 466463a27..8502ea6a2 100755 --- a/.github/workflows/scripts/build_push.sh +++ b/.github/workflows/scripts/build_push.sh @@ -46,7 +46,7 @@ function docker_build() { # $1 is like "apple orange pear" for MEGA_SVC in $1; do case $MEGA_SVC in - "ChatQnA"|"CodeGen"|"CodeTrans"|"DocSum"|"Translation"|"AudioQnA"|"SearchQnA") + "ChatQnA"|"CodeGen"|"CodeTrans"|"DocSum"|"Translation"|"AudioQnA"|"SearchQnA"|"FaqGen") cd $MEGA_SVC/docker IMAGE_NAME="$(getImagenameFromMega $MEGA_SVC)" docker_build ${IMAGE_NAME} diff --git a/FaqGen/README.md b/FaqGen/README.md new file mode 100644 index 000000000..aa5435283 --- /dev/null +++ b/FaqGen/README.md @@ -0,0 +1,17 @@ +# FAQ Generation Application + +In today's data-driven world, organizations across various industries face the challenge of managing and understanding vast amounts of information. Legal documents, contracts, regulations, and customer inquiries often contain critical insights buried within dense text. Extracting and presenting these insights in a concise and accessible format is crucial for decision-making, compliance, and customer satisfaction. + +Our FAQ Generation Application leverages the power of large language models (LLMs) to revolutionize the way you interact with and comprehend complex textual data. By harnessing cutting-edge natural language processing techniques, our application can automatically generate comprehensive and natural-sounding frequently asked questions (FAQs) from your documents, legal texts, customer queries, and other sources. In this example use case, we utilize LangChain to implement FAQ Generation and facilitate LLM inference using Text Generation Inference on Intel Xeon and Gaudi2 processors. + +# Deploy FAQ Generation Service + +The FAQ Generation service can be effortlessly deployed on either Intel Gaudi2 or Intel XEON Scalable Processors. + +## Deploy FAQ Generation on Gaudi + +Refer to the [Gaudi Guide](./docker/gaudi/README.md) for instructions on deploying FAQ Generation on Gaudi. + +## Deploy FAQ Generation on Xeon + +Refer to the [Xeon Guide](./docker/xeon/README.md) for instructions on deploying FAQ Generation on Xeon. diff --git a/FaqGen/assets/img/faqgen_react_ui_text.png b/FaqGen/assets/img/faqgen_react_ui_text.png new file mode 100644 index 000000000..c6c9e5048 Binary files /dev/null and b/FaqGen/assets/img/faqgen_react_ui_text.png differ diff --git a/FaqGen/assets/img/faqgen_react_ui_text_file.png b/FaqGen/assets/img/faqgen_react_ui_text_file.png new file mode 100644 index 000000000..37d82bdde Binary files /dev/null and b/FaqGen/assets/img/faqgen_react_ui_text_file.png differ diff --git a/FaqGen/assets/img/faqgen_ui_text.png b/FaqGen/assets/img/faqgen_ui_text.png new file mode 100644 index 000000000..54b195577 Binary files /dev/null and b/FaqGen/assets/img/faqgen_ui_text.png differ diff --git a/FaqGen/docker/Dockerfile b/FaqGen/docker/Dockerfile new file mode 100644 index 000000000..4b575b8dd --- /dev/null +++ b/FaqGen/docker/Dockerfile @@ -0,0 +1,32 @@ + + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM langchain/langchain:latest + + +RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ + libgl1-mesa-glx \ + libjemalloc-dev \ + vim + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +RUN cd /home/user/ && \ + git clone https://github.com/opea-project/GenAIComps.git + +RUN cd /home/user/GenAIComps && pip install --no-cache-dir --upgrade pip && \ + pip install -r /home/user/GenAIComps/requirements.txt + +COPY ./faqgen.py /home/user/faqgen.py + +ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps + +USER user + +WORKDIR /home/user + +ENTRYPOINT ["python", "faqgen.py"] diff --git a/FaqGen/docker/faqgen.py b/FaqGen/docker/faqgen.py new file mode 100644 index 000000000..bfcf14871 --- /dev/null +++ b/FaqGen/docker/faqgen.py @@ -0,0 +1,36 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import asyncio +import os + +from comps import FaqGenGateway, MicroService, ServiceOrchestrator, ServiceType + +MEGA_SERVICE_HOST_IP = os.getenv("MEGA_SERVICE_HOST_IP", "0.0.0.0") +MEGA_SERVICE_PORT = int(os.getenv("MEGA_SERVICE_PORT", 8888)) +LLM_SERVICE_HOST_IP = os.getenv("LLM_SERVICE_HOST_IP", "0.0.0.0") +LLM_SERVICE_PORT = int(os.getenv("LLM_SERVICE_PORT", 9000)) + + +class FaqGenService: + def __init__(self, host="0.0.0.0", port=8000): + self.host = host + self.port = port + self.megaservice = ServiceOrchestrator() + + def add_remote_service(self): + llm = MicroService( + name="llm", + host=LLM_SERVICE_HOST_IP, + port=LLM_SERVICE_PORT, + endpoint="/v1/faqgen", + use_remote_service=True, + service_type=ServiceType.LLM, + ) + self.megaservice.add(llm) + self.gateway = FaqGenGateway(megaservice=self.megaservice, host="0.0.0.0", port=self.port) + + +if __name__ == "__main__": + faqgen = FaqGenService(host=MEGA_SERVICE_HOST_IP, port=MEGA_SERVICE_PORT) + faqgen.add_remote_service() diff --git a/FaqGen/docker/gaudi/README.md b/FaqGen/docker/gaudi/README.md new file mode 100644 index 000000000..63bd53631 --- /dev/null +++ b/FaqGen/docker/gaudi/README.md @@ -0,0 +1,171 @@ +# Build MegaService of FAQ Generation on Gaudi + +This document outlines the deployment process for a FAQ Generation application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Gaudi server. The steps include Docker image creation, container deployment via Docker Compose, and service execution to integrate microservices such as llm. We will publish the Docker images to Docker Hub, which will simplify the deployment process for this service. + +## 🚀 Build Docker Images + +First of all, you need to build Docker Images locally. This step can be ignored once the Docker images are published to Docker hub. + +```bash +git clone https://github.com/opea-project/GenAIComps.git +cd GenAIComps +``` + +### 1. Pull TGI Gaudi Image + +As TGI Gaudi has been officially published as a Docker image, we simply need to pull it: + +```bash +docker pull ghcr.io/huggingface/tgi-gaudi:1.2.1 +``` + +### 2. Build LLM Image + +```bash +docker build -t opea/llm-faqgen-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/llms/faq-generation/tgi/Dockerfile . +``` + +### 3. Build MegaService Docker Image + +To construct the Mega Service, we utilize the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline within the `faqgen.py` Python script. Build the MegaService Docker image using the command below: + +```bash +git clone https://github.com/opea-project/GenAIExamples +cd GenAIExamples/FaqGen/docker/ +docker build --no-cache -t opea/faqgen:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . +``` + +### 4. Build UI Docker Image + +Construct the frontend Docker image using the command below: + +```bash +cd GenAIExamples/FaqGen/docker/ui/ +docker build -t opea/faqgen-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f ./docker/Dockerfile . +``` + +### 5. Build react UI Docker Image (Optional) + +Build the frontend Docker image based on react framework via below command: + +```bash +cd GenAIExamples/FaqGen/docker/ui +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/faqgen" +docker build -t opea/faqgen-react-ui:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy --build-arg BACKEND_SERVICE_ENDPOINT=$BACKEND_SERVICE_ENDPOINT -f ./docker/Dockerfile.react . +``` + +Then run the command `docker images`, you will have the following Docker Images: + +1. `ghcr.io/huggingface/tgi-gaudi:1.2.1` +2. `opea/llm-faqgen-tgi:latest` +3. `opea/faqgen:latest` +4. `opea/faqgen-ui:latest` +5. `opea/faqgen-react-ui:latest` + +## 🚀 Start Microservices and MegaService + +### Setup Environment Variables + +Since the `docker_compose.yaml` will consume some environment variables, you need to setup them in advance as below. + +```bash +export no_proxy=${your_no_proxy} +export http_proxy=${your_http_proxy} +export https_proxy=${your_http_proxy} +export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export TGI_LLM_ENDPOINT="http://${your_ip}:8008" +export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} +export MEGA_SERVICE_HOST_IP=${host_ip} +export LLM_SERVICE_HOST_IP=${host_ip} +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/faqgen" +``` + +Note: Please replace with `host_ip` with your external IP address, do not use localhost. + +### Start Microservice Docker Containers + +```bash +cd GenAIExamples/FaqGen/docker/gaudi +docker compose -f docker_compose.yaml up -d +``` + +### Validate Microservices + +1. TGI Service + +```bash +curl http://${your_ip}:8008/generate \ + -X POST \ + -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":64, "do_sample": true}}' \ + -H 'Content-Type: application/json' +``` + +2. LLM Microservice + +```bash +curl http://${host_ip}:9000/v1/faqgen \ + -X POST \ + -d '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' \ + -H 'Content-Type: application/json' +``` + +3. MegaService + +```bash +curl http://${host_ip}:8888/v1/faqgen -H "Content-Type: application/json" -d '{ + "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." + }' +``` + +## Enable LangSmith to Monitor an Application (Optional) + +LangSmith offers a suite of tools to debug, evaluate, and monitor language models and intelligent agents. It can be used to assess benchmark data for each microservice. Before launching your services with `docker compose -f docker_compose.yaml up -d`, you need to enable LangSmith tracing by setting the `LANGCHAIN_TRACING_V2` environment variable to true and configuring your LangChain API key. + +Here's how you can do it: + +1. Install the latest version of LangSmith: + +```bash +pip install -U langsmith +``` + +2. Set the necessary environment variables: + +```bash +export LANGCHAIN_TRACING_V2=true +export LANGCHAIN_API_KEY=ls_... +``` + +## 🚀 Launch the UI + +Open this URL `http://{host_ip}:5173` in your browser to access the frontend. + +![project-screenshot](../../assets/img/faqgen_ui_text.png) + +## 🚀 Launch the React UI (Optional) + +To access the FAQGen (react based) frontend, modify the UI service in the `docker_compose.yaml` file. Replace `faqgen-xeon-ui-server` service with the `faqgen-xeon-react-ui-server` service as per the config below: + +```bash + faqgen-xeon-react-ui-server: + image: opea/faqgen-react-ui:latest + container_name: faqgen-xeon-react-ui-server + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + ports: + - 5174:80 + depends_on: + - faqgen-xeon-backend-server + ipc: host + restart: always +``` + +Open this URL `http://{host_ip}:5174` in your browser to access the react based frontend. + +- Create FAQs from Text input + ![project-screenshot](../../assets/img/faqgen_react_ui_text.png) + +- Create FAQs from Text Files + ![project-screenshot](../../assets/img/faqgen_react_ui_text_files.png) diff --git a/FaqGen/docker/gaudi/docker_compose.yaml b/FaqGen/docker/gaudi/docker_compose.yaml new file mode 100644 index 000000000..779576e57 --- /dev/null +++ b/FaqGen/docker/gaudi/docker_compose.yaml @@ -0,0 +1,77 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +version: "3.8" + +services: + tgi_service: + image: ghcr.io/huggingface/tgi-gaudi:2.0.1 + container_name: tgi-gaudi-server + ports: + - "8008:80" + volumes: + - "./data:/data" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HABANA_VISIBLE_DEVICES: all + OMPI_MCA_btl_vader_single_copy_mechanism: none + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + runtime: habana + cap_add: + - SYS_NICE + ipc: host + command: --model-id ${LLM_MODEL_ID} --max-input-length 1024 --max-total-tokens 2048 + llm_faqgen: + image: opea/llm-faqgen-tgi:latest + container_name: llm-faqgen-server + depends_on: + - tgi_service + ports: + - "9000:9000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY} + LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2} + LANGCHAIN_PROJECT: "opea-llm-service" + restart: unless-stopped + faqgen-gaudi-backend-server: + image: opea/faqgen:latest + container_name: faqgen-gaudi-backend-server + depends_on: + - tgi_service + - llm_faqgen + ports: + - "8888:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} + ipc: host + restart: always + faqgen-gaudi-ui-server: + image: opea/faqgen-ui:latest + container_name: faqgen-gaudi-ui-server + depends_on: + - faqgen-gaudi-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/FaqGen/docker/ui/docker/Dockerfile b/FaqGen/docker/ui/docker/Dockerfile new file mode 100644 index 000000000..ac2bb7da3 --- /dev/null +++ b/FaqGen/docker/ui/docker/Dockerfile @@ -0,0 +1,26 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Use node 20.11.1 as the base image +FROM node:20.11.1 + +# Update package manager and install Git +RUN apt-get update -y && apt-get install -y git + +# Copy the front-end code repository +COPY svelte /home/user/svelte + +# Set the working directory +WORKDIR /home/user/svelte + +# Install front-end dependencies +RUN npm install + +# Build the front-end application +RUN npm run build + +# Expose the port of the front-end application +EXPOSE 5173 + +# Run the front-end application in preview mode +CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"] \ No newline at end of file diff --git a/FaqGen/docker/ui/docker/Dockerfile.react b/FaqGen/docker/ui/docker/Dockerfile.react new file mode 100644 index 000000000..4e29136a6 --- /dev/null +++ b/FaqGen/docker/ui/docker/Dockerfile.react @@ -0,0 +1,20 @@ +FROM node as vite-app + +COPY . /usr/app +WORKDIR /usr/app/react + +ARG BACKEND_SERVICE_ENDPOINT +ENV VITE_FAQ_GEN_URL=$BACKEND_SERVICE_ENDPOINT + +RUN ["npm", "install"] +RUN ["npm", "run", "build"] + + +FROM nginx:alpine +EXPOSE 80 + + +COPY --from=vite-app /usr/app/react/nginx.conf /etc/nginx/conf.d/default.conf +COPY --from=vite-app /usr/app/react/dist /usr/share/nginx/html + +ENTRYPOINT ["nginx", "-g", "daemon off;"] \ No newline at end of file diff --git a/FaqGen/docker/ui/react/.eslintrc.cjs b/FaqGen/docker/ui/react/.eslintrc.cjs new file mode 100644 index 000000000..78174f683 --- /dev/null +++ b/FaqGen/docker/ui/react/.eslintrc.cjs @@ -0,0 +1,11 @@ +module.exports = { + root: true, + env: { browser: true, es2020: true }, + extends: ["eslint:recommended", "plugin:@typescript-eslint/recommended", "plugin:react-hooks/recommended"], + ignorePatterns: ["dist", ".eslintrc.cjs"], + parser: "@typescript-eslint/parser", + plugins: ["react-refresh"], + rules: { + "react-refresh/only-export-components": ["warn", { allowConstantExport: true }], + }, +}; diff --git a/FaqGen/docker/ui/react/.gitignore b/FaqGen/docker/ui/react/.gitignore new file mode 100644 index 000000000..a547bf36d --- /dev/null +++ b/FaqGen/docker/ui/react/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/FaqGen/docker/ui/react/README.md b/FaqGen/docker/ui/react/README.md new file mode 100644 index 000000000..65edba6d6 --- /dev/null +++ b/FaqGen/docker/ui/react/README.md @@ -0,0 +1,28 @@ +

Doc Summary React

+ +### 📸 Project Screenshots + +![project-screenshot](../../../assets/img/faqgen_react_ui_text.png) +![project-screenshot](../../../assets/img/faqgen_react_ui_text_file.png) + +

🧐 Features

+ +Here're some of the project's features: + +- Generate FAQs from Text via Pasting: Paste the text to into the text box, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below. + +- Generate FAQs from Text via txt file Upload: Upload the file in the Upload bar, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below. + +

🛠️ Get it Running:

+ +1. Clone the repo. + +2. cd command to the current folder. + +3. Modify the required .env variables. + ``` + VITE_DOC_SUM_URL = '' + ``` +4. Execute `npm install` to install the corresponding dependencies. + +5. Execute `npm run dev` in both environments diff --git a/FaqGen/docker/ui/react/index.html b/FaqGen/docker/ui/react/index.html new file mode 100644 index 000000000..21af14b47 --- /dev/null +++ b/FaqGen/docker/ui/react/index.html @@ -0,0 +1,18 @@ + + + + + + + + + Opea FAQ Gen + + +
+ + + diff --git a/FaqGen/docker/ui/react/nginx.conf b/FaqGen/docker/ui/react/nginx.conf new file mode 100644 index 000000000..00433fcda --- /dev/null +++ b/FaqGen/docker/ui/react/nginx.conf @@ -0,0 +1,20 @@ +server { + listen 80; + + gzip on; + gzip_proxied any; + gzip_comp_level 6; + gzip_buffers 16 8k; + gzip_http_version 1.1; + gzip_types font/woff2 text/css application/javascript application/json application/font-woff application/font-tff image/gif image/png image/svg+xml application/octet-stream; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + try_files $uri $uri/ /index.html =404; + + location ~* \.(gif|jpe?g|png|webp|ico|svg|css|js|mp4|woff2)$ { + expires 1d; + } + } +} \ No newline at end of file diff --git a/FaqGen/docker/ui/react/package.json b/FaqGen/docker/ui/react/package.json new file mode 100644 index 000000000..1d88b1c6b --- /dev/null +++ b/FaqGen/docker/ui/react/package.json @@ -0,0 +1,52 @@ +{ + "name": "ui", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc && vite build", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "vite preview", + "test": "vitest" + }, + "dependencies": { + "@mantine/core": "^7.11.1", + "@mantine/dropzone": "^7.11.1", + "@mantine/hooks": "^7.11.1", + "@mantine/notifications": "^7.11.1", + "@microsoft/fetch-event-source": "^2.0.1", + "@reduxjs/toolkit": "^2.2.5", + "@tabler/icons-react": "^3.9.0", + "axios": "^1.7.2", + "luxon": "^3.4.4", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-markdown": "^9.0.1", + "react-syntax-highlighter": "^15.5.0", + "remark-frontmatter": "^5.0.0", + "remark-gfm": "^4.0.0" + }, + "devDependencies": { + "@testing-library/react": "^16.0.0", + "@types/luxon": "^3.4.2", + "@types/node": "^20.12.12", + "@types/react": "^18.2.66", + "@types/react-dom": "^18.2.22", + "@types/react-syntax-highlighter": "^15.5.13", + "@typescript-eslint/eslint-plugin": "^7.2.0", + "@typescript-eslint/parser": "^7.2.0", + "@vitejs/plugin-react": "^4.2.1", + "eslint": "^8.57.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.6", + "jsdom": "^24.1.0", + "postcss": "^8.4.38", + "postcss-preset-mantine": "^1.15.0", + "postcss-simple-vars": "^7.0.1", + "sass": "1.64.2", + "typescript": "^5.2.2", + "vite": "^5.2.13", + "vitest": "^1.6.0" + } +} diff --git a/FaqGen/docker/ui/react/postcss.config.cjs b/FaqGen/docker/ui/react/postcss.config.cjs new file mode 100644 index 000000000..e817f567b --- /dev/null +++ b/FaqGen/docker/ui/react/postcss.config.cjs @@ -0,0 +1,14 @@ +module.exports = { + plugins: { + "postcss-preset-mantine": {}, + "postcss-simple-vars": { + variables: { + "mantine-breakpoint-xs": "36em", + "mantine-breakpoint-sm": "48em", + "mantine-breakpoint-md": "62em", + "mantine-breakpoint-lg": "75em", + "mantine-breakpoint-xl": "88em", + }, + }, + }, +}; diff --git a/FaqGen/docker/ui/react/public/vite.svg b/FaqGen/docker/ui/react/public/vite.svg new file mode 100644 index 000000000..e7b8dfb1b --- /dev/null +++ b/FaqGen/docker/ui/react/public/vite.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/FaqGen/docker/ui/react/src/App.scss b/FaqGen/docker/ui/react/src/App.scss new file mode 100644 index 000000000..187764a17 --- /dev/null +++ b/FaqGen/docker/ui/react/src/App.scss @@ -0,0 +1,42 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +@import "./styles/styles"; + +.root { + @include flex(row, nowrap, flex-start, flex-start); +} + +.layout-wrapper { + @include absolutes; + + display: grid; + + width: 100%; + height: 100%; + + grid-template-columns: 80px auto; + grid-template-rows: 1fr; +} + +/* ===== Scrollbar CSS ===== */ +/* Firefox */ +* { + scrollbar-width: thin; + scrollbar-color: #d6d6d6 #ffffff; +} + +/* Chrome, Edge, and Safari */ +*::-webkit-scrollbar { + width: 8px; +} + +*::-webkit-scrollbar-track { + background: #ffffff; +} + +*::-webkit-scrollbar-thumb { + background-color: #d6d6d6; + border-radius: 16px; + border: 4px double #dedede; +} diff --git a/FaqGen/docker/ui/react/src/App.tsx b/FaqGen/docker/ui/react/src/App.tsx new file mode 100644 index 000000000..2f93aee99 --- /dev/null +++ b/FaqGen/docker/ui/react/src/App.tsx @@ -0,0 +1,32 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +import "./App.scss" +import { MantineProvider } from "@mantine/core" +import '@mantine/notifications/styles.css'; +import { SideNavbar, SidebarNavList } from "./components/sidebar/sidebar" +import { IconFileText } from "@tabler/icons-react" +import { Notifications } from '@mantine/notifications'; +import FaqGen from "./components/FaqGen/FaqGen"; + +const title = "Faq Generator" +const navList: SidebarNavList = [ + { icon: IconFileText, label: title } +] + +function App() { + + return ( + + +
+ +
+ +
+
+
+ ) +} + +export default App diff --git a/FaqGen/docker/ui/react/src/__tests__/util.test.ts b/FaqGen/docker/ui/react/src/__tests__/util.test.ts new file mode 100644 index 000000000..e67ba2c86 --- /dev/null +++ b/FaqGen/docker/ui/react/src/__tests__/util.test.ts @@ -0,0 +1,14 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +import { describe, expect, test } from "vitest"; +import { getCurrentTimeStamp, uuidv4 } from "../common/util"; + +describe("unit tests", () => { + test("check UUID is of length 36", () => { + expect(uuidv4()).toHaveLength(36); + }); + test("check TimeStamp generated is of unix", () => { + expect(getCurrentTimeStamp()).toBe(Math.floor(Date.now() / 1000)); + }); +}); diff --git a/FaqGen/docker/ui/react/src/assets/opea-icon-black.svg b/FaqGen/docker/ui/react/src/assets/opea-icon-black.svg new file mode 100644 index 000000000..5c96dc762 --- /dev/null +++ b/FaqGen/docker/ui/react/src/assets/opea-icon-black.svg @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/FaqGen/docker/ui/react/src/assets/opea-icon-color.svg b/FaqGen/docker/ui/react/src/assets/opea-icon-color.svg new file mode 100644 index 000000000..790151171 --- /dev/null +++ b/FaqGen/docker/ui/react/src/assets/opea-icon-color.svg @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/FaqGen/docker/ui/react/src/assets/react.svg b/FaqGen/docker/ui/react/src/assets/react.svg new file mode 100644 index 000000000..6c87de9bb --- /dev/null +++ b/FaqGen/docker/ui/react/src/assets/react.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/FaqGen/docker/ui/react/src/common/client.ts b/FaqGen/docker/ui/react/src/common/client.ts new file mode 100644 index 000000000..7512f73e3 --- /dev/null +++ b/FaqGen/docker/ui/react/src/common/client.ts @@ -0,0 +1,8 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +import axios from "axios"; + +//add iterceptors to add any request headers + +export default axios; diff --git a/FaqGen/docker/ui/react/src/common/util.ts b/FaqGen/docker/ui/react/src/common/util.ts new file mode 100644 index 000000000..df65b2d8e --- /dev/null +++ b/FaqGen/docker/ui/react/src/common/util.ts @@ -0,0 +1,12 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +export const getCurrentTimeStamp = () => { + return Math.floor(Date.now() / 1000); +}; + +export const uuidv4 = () => { + return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) => + (+c ^ (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (+c / 4)))).toString(16), + ); +}; diff --git a/FaqGen/docker/ui/react/src/components/FaqGen/FaqGen.tsx b/FaqGen/docker/ui/react/src/components/FaqGen/FaqGen.tsx new file mode 100644 index 000000000..ca731cbf8 --- /dev/null +++ b/FaqGen/docker/ui/react/src/components/FaqGen/FaqGen.tsx @@ -0,0 +1,167 @@ +import styleClasses from './faqGen.module.scss' +import { Button, Text, Textarea, Title } from '@mantine/core' +import { FileUpload } from './FileUpload' +import { useEffect, useState } from 'react' +import Markdown from '../Shared/Markdown/Markdown' +import { fetchEventSource } from '@microsoft/fetch-event-source' +import { notifications } from '@mantine/notifications' +import { FAQ_GEN_URL } from '../../config' +import { FileWithPath } from '@mantine/dropzone' + + +const FaqGen = () => { + const [isFile, setIsFile] = useState(false); + const [files, setFiles] = useState([]) + const [isGenerating, setIsGenerating] = useState(false); + const [value, setValue] = useState(''); + const [fileContent, setFileContent] = useState(''); + const [response, setResponse] = useState(''); + + let messagesEnd:HTMLDivElement; + + const scrollToView = () => { + if (messagesEnd) { + messagesEnd.scrollTop = messagesEnd.scrollHeight; + } + }; + useEffect(()=>{ + scrollToView() + },[response]) + + useEffect(() => { + if(isFile){ + setValue('') + } + },[isFile]) + + useEffect(()=>{ + if (files.length) { + const reader = new FileReader() + reader.onload = async () => { + const text = reader.result?.toString() + setFileContent(text || '') + }; + reader.readAsText(files[0]) + } + },[files]) + + + const handleSubmit = async () => { + setResponse("") + if(!isFile && !value){ + notifications.show({ + color: "red", + id: "input", + message: "Please Upload Content", + }) + return + } + + setIsGenerating(true) + const body = { + messages: isFile ? fileContent : value + } + fetchEventSource(FAQ_GEN_URL, { + method: "POST", + headers: { + "Content-Type": "application/json", + "Accept": "*/*" + }, + body: JSON.stringify(body), + openWhenHidden: true, + async onopen(response) { + if (response.ok) { + return; + } else if (response.status >= 400 && response.status < 500 && response.status !== 429) { + const e = await response.json(); + console.log(e); + throw Error(e.error.message); + } else { + console.log("error", response); + } + }, + onmessage(msg) { + if (msg?.data != "[DONE]") { + try { + const res = JSON.parse(msg.data) + const logs = res.ops; + logs.forEach((log: { op: string; path: string; value: string }) => { + if (log.op === "add") { + if ( + log.value !== "" && log.path.endsWith("/streamed_output/-") && log.path.length > "/streamed_output/-".length + ) { + setResponse(prev=>prev+log.value); + } + } + }); + } catch (e) { + console.log("something wrong in msg", e); + throw e; + } + } + }, + onerror(err) { + console.log("error", err); + setIsGenerating(false) + throw err; + }, + onclose() { + setIsGenerating(false) + }, + }); +} + + + return ( +
+
+
+
+ FAQ Generator +
+
+ Please upload file or paste content for generating Faq's. +
+
+ + + + +
+
+ {isFile ? ( +
+ { setFiles(files) }} /> +
+ ) : ( +
+