jjw0-0 commited on
Commit
a6d12f9
ยท
verified ยท
1 Parent(s): 49c944c

Upload 7 files

Browse files
Files changed (7) hide show
  1. .gitattributes +35 -35
  2. .streamlit/config.toml +3 -0
  3. Dockerfile +21 -20
  4. README.md +19 -20
  5. requirements.txt +15 -3
  6. src/htmlTemplates.py +44 -0
  7. src/streamlit_app.py +214 -40
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.streamlit/config.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [server]
2
+ enableXsrfProtection = false
3
+ enableCORS = true
Dockerfile CHANGED
@@ -1,20 +1,21 @@
1
- FROM python:3.13.5-slim
2
-
3
- WORKDIR /app
4
-
5
- RUN apt-get update && apt-get install -y \
6
- build-essential \
7
- curl \
8
- git \
9
- && rm -rf /var/lib/apt/lists/*
10
-
11
- COPY requirements.txt ./
12
- COPY src/ ./src/
13
-
14
- RUN pip3 install -r requirements.txt
15
-
16
- EXPOSE 8501
17
-
18
- HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
19
-
20
- ENTRYPOINT ["streamlit", "run", "src/streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"]
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ RUN apt-get update && apt-get install -y \
6
+ build-essential \
7
+ curl \
8
+ git \
9
+ && rm -rf /var/lib/apt/lists/*
10
+
11
+ COPY requirements.txt ./
12
+ COPY src/ ./src/
13
+ COPY .streamlit/config.toml .streamlit/config.toml
14
+
15
+ RUN pip3 install -r requirements.txt
16
+
17
+ EXPOSE 8501
18
+
19
+ HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
20
+
21
+ ENTRYPOINT ["streamlit", "run", "src/streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0", "--server.enableXsrfProtection=false", "--server.enableCORS=true"]
README.md CHANGED
@@ -1,20 +1,19 @@
1
- ---
2
- title: Machine Learning Assignment 8
3
- emoji: ๐Ÿš€
4
- colorFrom: red
5
- colorTo: red
6
- sdk: docker
7
- app_port: 8501
8
- tags:
9
- - streamlit
10
- pinned: false
11
- short_description: Streamlit template space
12
- license: apache-2.0
13
- ---
14
-
15
- # Welcome to Streamlit!
16
-
17
- Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
18
-
19
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
20
- forums](https://discuss.streamlit.io).
 
1
+ ---
2
+ title: Test Llama
3
+ emoji: ๐Ÿš€
4
+ colorFrom: red
5
+ colorTo: red
6
+ sdk: docker
7
+ app_port: 8501
8
+ tags:
9
+ - streamlit
10
+ pinned: false
11
+ short_description: Streamlit template space
12
+ ---
13
+
14
+ # Welcome to Streamlit!
15
+
16
+ Edit `/src/streamlit_app.py` to customize this app to your heart's desire. :heart:
17
+
18
+ If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
19
+ forums](https://discuss.streamlit.io).
 
requirements.txt CHANGED
@@ -1,3 +1,15 @@
1
- altair
2
- pandas
3
- streamlit
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain>=0.1.20,<0.2
2
+ langchain-community>=0.0.38,<0.1
3
+ langchain-text-splitters
4
+ langchain-groq>=0.1.5
5
+ PyPDF2==3.0.1
6
+ faiss-cpu==1.7.4
7
+ pypdf==4.2.0
8
+ chromadb==0.4.24
9
+ tiktoken==0.7.0
10
+ streamlit==1.33.0
11
+ streamlit-extras==0.4.2
12
+ InstructorEmbedding==1.0.1
13
+ sentence-transformers==2.5.1
14
+ huggingface-hub==0.22.2
15
+ python-dotenv==1.0.1
src/htmlTemplates.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ css = '''
2
+ <style>
3
+ .chat-message {
4
+ padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
5
+ }
6
+ .chat-message.user {
7
+ background-color: #2b313e
8
+ }
9
+ .chat-message.bot {
10
+ background-color: #475063
11
+ }
12
+ .chat-message .avatar {
13
+ width: 20%;
14
+ }
15
+ .chat-message .avatar img {
16
+ max-width: 78px;
17
+ max-height: 78px;
18
+ border-radius: 50%;
19
+ object-fit: cover;
20
+ }
21
+ .chat-message .message {
22
+ width: 80%;
23
+ padding: 0 1.5rem;
24
+ color: #fff;
25
+ }
26
+ '''
27
+
28
+ bot_template = '''
29
+ <div class="chat-message bot">
30
+ <div class="avatar">
31
+ <img src="https://i.ibb.co/cN0nmSj/Screenshot-2023-05-28-at-02-37-21.png" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
32
+ </div>
33
+ <div class="message">{{MSG}}</div>
34
+ </div>
35
+ '''
36
+
37
+ user_template = '''
38
+ <div class="chat-message user">
39
+ <div class="avatar">
40
+ <img src="https://i.ibb.co/rdZC7LZ/Photo-logo-1.png">
41
+ </div>
42
+ <div class="message">{{MSG}}</div>
43
+ </div>
44
+ '''
src/streamlit_app.py CHANGED
@@ -1,40 +1,214 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
- import streamlit as st
5
-
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ # from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
4
+ # from langchain.vectorstores import FAISS
5
+ # from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
6
+ from langchain.memory import ConversationBufferMemory
7
+ from langchain.chains import ConversationalRetrievalChain
8
+ from htmlTemplates import css, bot_template, user_template
9
+ # from langchain.llms import LlamaCpp # For loading transformer models.
10
+ # from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
11
+ # ํ…์ŠคํŠธ ์Šคํ”Œ๋ฆฌํ„ฐ
12
+ from langchain_text_splitters import CharacterTextSplitter, RecursiveCharacterTextSplitter
13
+
14
+ # ๋ฒกํ„ฐ์Šคํ† ์–ด/์ž„๋ฒ ๋”ฉ/LLM
15
+ from langchain_community.vectorstores import FAISS
16
+ from langchain_community.embeddings import HuggingFaceEmbeddings
17
+
18
+ # ๋กœ๋”๋“ค (pebblo/pwd ๋Œ๋ ค์˜ค์ง€ ์•Š๊ฒŒ ์„œ๋ธŒ๋ชจ๋“ˆ๋กœ)
19
+ from langchain_community.document_loaders.pdf import PyPDFLoader
20
+ from langchain_community.document_loaders.text import TextLoader
21
+ from langchain_community.document_loaders.csv_loader import CSVLoader
22
+ from langchain_community.document_loaders.json_loader import JSONLoader
23
+ import tempfile # ์ž„์‹œ ํŒŒ์ผ์„ ์ƒ์„ฑํ•˜๊ธฐ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์ž…๋‹ˆ๋‹ค.
24
+ import os
25
+ import json
26
+ from langchain.docstore.document import Document
27
+ from langchain_groq import ChatGroq
28
+
29
+ # PDF ๋ฌธ์„œ๋กœ๋ถ€ํ„ฐ ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
30
+ def get_pdf_text(pdf_docs):
31
+ temp_dir = tempfile.TemporaryDirectory() # ์ž„์‹œ ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
32
+ temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # ์ž„์‹œ ํŒŒ์ผ ๊ฒฝ๋กœ๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
33
+ with open(temp_filepath, "wb") as f: # ์ž„์‹œ ํŒŒ์ผ์„ ๋ฐ”์ด๋„ˆ๋ฆฌ ์“ฐ๊ธฐ ๋ชจ๋“œ๋กœ ์—ฝ๋‹ˆ๋‹ค.
34
+ f.write(pdf_docs.getvalue()) # PDF ๋ฌธ์„œ์˜ ๋‚ด์šฉ์„ ์ž„์‹œ ํŒŒ์ผ์— ์”๋‹ˆ๋‹ค.
35
+ pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoader๋ฅผ ์‚ฌ์šฉํ•ด PDF๋ฅผ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
36
+ pdf_doc = pdf_loader.load() # ํ…์ŠคํŠธ๋ฅผ ์ถ”์ถœํ•ฉ๋‹ˆ๋‹ค.
37
+ return pdf_doc # ์ถ”์ถœํ•œ ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
38
+
39
+
40
+ def get_text_file(docs):
41
+ #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
42
+ return text_doc
43
+
44
+
45
+ def get_csv_file(docs):
46
+ #################### ๋‚ด์šฉ์„ ์ถ”๊ฐ€ํ•  ๋ถ€๋ถ„
47
+ return csv_doc
48
+
49
+ # def get_json_file(docs):
50
+ # temp_dir = tempfile.TemporaryDirectory()
51
+ # temp_filepath = os.path.join(temp_dir.name, docs.name)
52
+ # with open(temp_filepath, "wb") as f:
53
+ # f.write(docs.getvalue())
54
+ # json_loader = JSONLoader(temp_filepath,
55
+ # jq_schema='.scans[].relationships',
56
+ # text_content=False)
57
+ #
58
+ # json_doc = json_loader.load()
59
+ # # print('json_doc = ',json_doc)
60
+ # return json_doc
61
+
62
+ def get_json_file(file) -> list[Document]:
63
+ # Streamlit UploadedFile -> str
64
+ raw = file.getvalue().decode("utf-8", errors="ignore")
65
+ data = json.loads(raw)
66
+
67
+ docs = []
68
+
69
+ # ์˜ˆ์ „ jq ๊ฒฝ๋กœ๊ฐ€ '.scans[].relationships'์˜€๋‹ค๋ฉด, ๋™์ผํ•œ ์˜๋ฏธ๋กœ ํŒŒ์‹ฑ:
70
+ # ์กด์žฌํ•˜๋ฉด ๊ทธ๊ฒƒ๋งŒ ๋ฝ‘๊ณ , ์—†์œผ๋ฉด ํ†ต์œผ๋กœ ๋ฌธ์„œํ™”
71
+ def add_doc(x):
72
+ docs.append(Document(page_content=json.dumps(x, ensure_ascii=False)))
73
+
74
+ if isinstance(data, dict) and "scans" in data and isinstance(data["scans"], list):
75
+ for s in data["scans"]:
76
+ rels = s.get("relationships", [])
77
+ if isinstance(rels, list) and rels:
78
+ for r in rels:
79
+ add_doc(r)
80
+ if not docs: # ๊ทธ๋ž˜๋„ ๋ชป ๋ฝ‘์•˜์œผ๋ฉด ์ „์ฒด๋ฅผ ํ•˜๋‚˜๋กœ
81
+ add_doc(data)
82
+ elif isinstance(data, list):
83
+ for item in data:
84
+ add_doc(item)
85
+ else:
86
+ add_doc(data)
87
+
88
+ return docs
89
+
90
+ # ๋ฌธ์„œ๋“ค์„ ์ฒ˜๋ฆฌํ•˜์—ฌ ํ…์ŠคํŠธ ์ฒญํฌ๋กœ ๋‚˜๋ˆ„๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
91
+ def get_text_chunks(documents):
92
+ text_splitter = RecursiveCharacterTextSplitter(
93
+ chunk_size=1000, # ์ฒญํฌ์˜ ํฌ๊ธฐ๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
94
+ chunk_overlap=200, # ์ฒญํฌ ์‚ฌ์ด์˜ ์ค‘๋ณต์„ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
95
+ length_function=len # ํ…์ŠคํŠธ์˜ ๊ธธ์ด๋ฅผ ์ธก์ •ํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ์ง€์ •ํ•ฉ๋‹ˆ๋‹ค.
96
+ )
97
+
98
+ documents = text_splitter.split_documents(documents) # ๋ฌธ์„œ๋“ค์„ ์ฒญํฌ๋กœ ๋‚˜๋ˆ•๋‹ˆ๋‹ค.
99
+ return documents # ๋‚˜๋ˆˆ ์ฒญํฌ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
100
+
101
+
102
+ # ํ…์ŠคํŠธ ์ฒญํฌ๋“ค๋กœ๋ถ€ํ„ฐ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
103
+ def get_vectorstore(text_chunks):
104
+ # ์›ํ•˜๋Š” ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ๋กœ๋“œํ•ฉ๋‹ˆ๋‹ค.
105
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
106
+ model_kwargs={'device': 'cpu'}) # ์ž„๋ฒ ๋”ฉ ๋ชจ๋ธ์„ ์„ค์ •ํ•ฉ๋‹ˆ๋‹ค.
107
+ vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
108
+ return vectorstore # ์ƒ์„ฑ๋œ ๋ฒกํ„ฐ ์Šคํ† ์–ด๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
109
+
110
+
111
+ def get_conversation_chain(vectorstore):
112
+ # Groq LLM
113
+ llm = ChatGroq(
114
+ groq_api_key=os.environ.get("GROQ_API_KEY"),
115
+ model_name="llama-3.1-8b-instant",
116
+ temperature=0.75, # ํ•„์š”์— ๋งž๊ฒŒ ํŠœ๋‹
117
+ max_tokens=512 # ์ปจํ…์ŠคํŠธ ์ดˆ๊ณผ ๋ฐฉ์ง€์šฉ (ํ•„์š”์‹œ ์กฐ์ •)
118
+ )
119
+
120
+ memory = ConversationBufferMemory(
121
+ memory_key="chat_history",
122
+ return_messages=True
123
+ )
124
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
125
+
126
+ conversation_chain = ConversationalRetrievalChain.from_llm(
127
+ llm=llm,
128
+ retriever=retriever,
129
+ memory=memory,
130
+ )
131
+ return conversation_chain
132
+
133
+ # ์‚ฌ์šฉ์ž ์ž…๋ ฅ์„ ์ฒ˜๋ฆฌํ•˜๋Š” ํ•จ์ˆ˜์ž…๋‹ˆ๋‹ค.
134
+ def handle_userinput(user_question):
135
+ print('user_question => ', user_question)
136
+ # ๋Œ€ํ™” ์ฒด์ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์‚ฌ์šฉ์ž ์งˆ๋ฌธ์— ๋Œ€ํ•œ ์‘๋‹ต์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.
137
+ response = st.session_state.conversation({'question': user_question})
138
+ # ๋Œ€ํ™” ๊ธฐ๋ก์„ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค.
139
+ st.session_state.chat_history = response['chat_history']
140
+
141
+ for i, message in enumerate(st.session_state.chat_history):
142
+ if i % 2 == 0:
143
+ st.write(user_template.replace(
144
+ "{{MSG}}", message.content), unsafe_allow_html=True)
145
+ else:
146
+ st.write(bot_template.replace(
147
+ "{{MSG}}", message.content), unsafe_allow_html=True)
148
+
149
+
150
+ def main():
151
+ load_dotenv()
152
+ st.set_page_config(page_title="Basic_RAG_AI_Chatbot_with_Llama",
153
+ page_icon=":books:")
154
+ st.write(css, unsafe_allow_html=True)
155
+
156
+ if "conversation" not in st.session_state:
157
+ st.session_state.conversation = None
158
+ if "chat_history" not in st.session_state:
159
+ st.session_state.chat_history = None
160
+
161
+ st.header("Basic_RAG_AI_Chatbot_with_Llama3 :books:")
162
+ user_question = st.text_input("Ask a question about your documents:")
163
+ if user_question:
164
+ handle_userinput(user_question)
165
+
166
+ with st.sidebar:
167
+ st.subheader("Your documents")
168
+ docs = st.file_uploader(
169
+ "Upload your Files here and click on 'Process'", accept_multiple_files=True)
170
+ if st.button("Process[PDF]"):
171
+ with st.spinner("Processing"):
172
+ # get pdf text
173
+ doc_list = []
174
+ for file in docs:
175
+ print('file - type : ', file.type)
176
+ if file.type in ['application/octet-stream', 'application/pdf']:
177
+ # file is .pdf
178
+ doc_list.extend(get_pdf_text(file))
179
+ else:
180
+ st.error("PDF ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
181
+ if not doc_list:
182
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
183
+ st.stop()
184
+
185
+ text_chunks = get_text_chunks(doc_list)
186
+ vectorstore = get_vectorstore(text_chunks)
187
+ st.session_state.conversation = get_conversation_chain(vectorstore)
188
+
189
+ ################## TXT, CSV ๋ฒ„ํŠผ ๊ตฌํ˜„
190
+ # TXT ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/plain':
191
+ # CSV ๋ฒ„ํŠผ ๊ตฌํ˜„ ์ฐธ๊ณ  : if file.type == 'text/csv':
192
+
193
+ if st.button("Process[JSON]"):
194
+ with st.spinner("Processing"):
195
+ # get txt text
196
+ doc_list = []
197
+ for file in docs:
198
+ print('file - type : ', file.type)
199
+ if file.type == 'application/json':
200
+ # file is .json
201
+ doc_list.extend(get_json_file(file))
202
+ else:
203
+ st.error("JSON ํŒŒ์ผ์ด ์•„๋‹™๋‹ˆ๋‹ค.")
204
+ if not doc_list:
205
+ st.error("์ฒ˜๋ฆฌ ๊ฐ€๋Šฅํ•œ ๋ฌธ์„œ๋ฅผ ์ฐพ์ง€ ๋ชปํ–ˆ์Šต๋‹ˆ๋‹ค.")
206
+ st.stop()
207
+
208
+ text_chunks = get_text_chunks(doc_list)
209
+ vectorstore = get_vectorstore(text_chunks)
210
+ st.session_state.conversation = get_conversation_chain(vectorstore)
211
+
212
+
213
+ if __name__ == '__main__':
214
+ main()