diff --git a/presentation.tex b/presentation.tex index ce4f378..b90196f 100644 --- a/presentation.tex +++ b/presentation.tex @@ -1,4 +1,4 @@ -\documentclass[compress,mathserif,fleqn,10pt]{beamer} +\documentclass[compress, mathserif, fleqn, 10pt]{beamer} \useoutertheme{split} \useoutertheme[subsection=false]{smoothbars} \useinnertheme[shadow=true]{rounded} @@ -15,10 +15,10 @@ \def\beamer@writeslidentry@miniframesoff{% \expandafter\beamer@ifempty\expandafter{\beamer@framestartpage}{}% does not happen normally {%else + % removed \addtocontents commands \clearpage\beamer@notesactions% - } -} +} } \newcommand*{\miniframeson}{\let\beamer@writeslidentry=\beamer@writeslidentry@miniframeson} \newcommand*{\miniframesoff}{\let\beamer@writeslidentry=\beamer@writeslidentry@miniframesoff} \makeatother @@ -37,27 +37,34 @@ \usepackage{tikz} \title{Transformers and Multi-features Time2Vec for Financial Prediction} -\author[Bui Nguyen Kim Hai, Nguyen Duy Chien]{Bui Nguyen Kim Hai, Nguyen Duy Chien} +\author{[} +Bui +Nguyen +Kim +Hai, +Nguyen +Duy +Chien]{Bui Nguyen Kim Hai, Nguyen Duy Chien} %\institute{Department of Numerical Analysis, Faculty of Informatics\\ ELTE Eötvös Loránd University, Budapest, Hungary} -\date{\scriptsize \emph{TDK CONFERENCE – IT SCIENCE SECTION, 2024 SPRING}\\\bigskip Budapest, Hungary\\ May 29, 2024} +\date{\scriptsize \emph{TDK CONFERENCE – IT SCIENCE SECTION, 2024 SPRING}\\ + \bigskip + Budapest, Hungary\\ May 29, 2024} \begin{document} - - \abovedisplayskip=1pt - \belowdisplayskip=2pt - \abovedisplayshortskip=1pt - \belowdisplayshortskip=2pt + \abovedisplayskip=1pt \belowdisplayskip=2pt \abovedisplayshortskip=1pt \belowdisplayshortskip=2pt \begin{frame} \titlepage \end{frame} - \begin{frame}\frametitle{Outline} + \begin{frame} + \frametitle{Outline} \tableofcontents \end{frame} \section{Introduction} - \begin{frame}\frametitle{Outline} + \begin{frame} + \frametitle{Outline} \tableofcontents[currentsection] \end{frame} @@ -65,14 +72,18 @@ \begin{frame}{Motivation} \begin{block}{By other works} \begin{itemize} - \item Researchers try to combine Time2Vec with CNN, RNN, LSTM, and Attention mechanism + \item Researchers try to combine Time2Vec with CNN, RNN, LSTM, and + Attention mechanism + \item For instances: - \begin{itemize} - \item Aeroengine Risk Assessment - \item Predicting Production in Shale and Sandstone Gas Reservoirs - \item Stock Price Forecasting - \end{itemize} + \begin{itemize} + \item Aeroengine Risk Assessment + + \item Predicting Production in Shale and Sandstone Gas Reservoirs + + \item Stock Price Forecasting + \end{itemize} \end{itemize} \end{block} \smallskip @@ -85,7 +96,9 @@ \begin{block}{By observing trends} \begin{itemize} \item Stock's trend is a Markov process + \item Historical data offers limited foresight + \item Stocks having similar trend is more promising \end{itemize} \end{block} @@ -94,7 +107,7 @@ \begin{frame}{Motivation: Cross-correlation to NASDAQ} \centerline{\includegraphics[width=0.85\textwidth]{images/nas_base.eps}} \end{frame} - + \begin{frame}{Motivation: Cross-correlation to Exxon Mobil} \centerline{\includegraphics[width=0.85\textwidth]{images/exx_base.eps}} \end{frame} @@ -108,10 +121,11 @@ Handling temporal problems in sequential data and time-series analysis. \end{block} \begin{block}{LSTM} - Using gates, LSTM enables network to learn long-term dependencies and prevent the vanishing gradient problem. + Using gates, LSTM enables network to learn long-term dependencies and + prevent the vanishing gradient problem. \end{block} \begin{block}{Transformer} - The SOTA architecture that works well in many area such as NLP, and time-series + The SOTA architecture that works well in many area such as NLP, and time-series \end{block} \begin{block}{Time2Vec} Use to embed the time-series data to vector @@ -119,13 +133,14 @@ \end{frame} \section{Proposed model and techniques} - \begin{frame}\frametitle{Outline} + \begin{frame} + \frametitle{Outline} \tableofcontents[currentsection] \end{frame} %\subsection{Behavioral similarity of stocks} %\begin{frame}{Behavioral similarity of stocks} - + %\end{frame} \subsection{Data collection} @@ -142,6 +157,7 @@ \begin{block}{Collected datasets} \begin{itemize} \item \structure{Group1}: NASDAQ, S\&P500, DJI, DAX + \item \structure{Group2}: Exxon Mobil, Chervon \end{itemize} \end{block} @@ -154,9 +170,13 @@ \begin{block}{Techniques} \begin{itemize} \item \structure{Fill-forward}: Filling missing data in dataset + \item \structure{Moving Average}: Smoothing dataset by averaging data + \item \structure{Percentage Change}: Compute the difference in the data + \item \structure{Min-Max Normalization}: Normalizing dataset + \item \structure{Geometry Mean Not NaN (GMNN)}: Combining multiple datasets \end{itemize} \end{block} @@ -172,12 +192,12 @@ \begin{itemize} \item \structure{Union}: Handling length difference when combining datasets \smallskip + \item \structure{Invariant}: Keeping the data stays normalized \smallskip \item \structure{Representation}: The output reflects the whole datasets \smallskip - \end{itemize} \end{block} \vspace*{1cm} @@ -208,16 +228,23 @@ \begin{block}{Roles} \begin{itemize} \item \structure{Time2Vec} - \begin{itemize} - \item \structure{Linear}: Capturing linear trends \smallskip - \item \structure{Sine, Cosine}: Encoding positions and capturing periodic behaviors \smallskip - \item \structure{Concat}: Concatenating above three layers - \end{itemize} + \begin{itemize} + \item \structure{Linear}: Capturing linear trends + \smallskip + + \item \structure{Sine, Cosine}: Encoding positions and capturing + periodic behaviors + \smallskip + + \item \structure{Concat}: Concatenating above three layers + \end{itemize} \bigskip + \item \structure{Attention Layers} - \begin{itemize} - \item To study the trend from different aspects, positions \smallskip - \end{itemize} + \begin{itemize} + \item To study the trend from different aspects, positions + \smallskip + \end{itemize} \end{itemize} \end{block} \end{column} @@ -230,10 +257,15 @@ \begin{block}{Roles} \begin{itemize} \item \structure{Time2Vec}: Catch continuous attribute of time + \item \structure{Concat}: Apply Residual Connection + \item \structure{Attention}: Deep understanding trend movements + \item \structure{Pooling}: Reducing dimension + \item \structure{Dropout}: Prevent over-fitting + \item \structure{Dense}: Apply activation functions (ReLU) \end{itemize} \end{block} @@ -254,7 +286,9 @@ \begin{minipage}[t][2cm][t]{\textwidth} \begin{itemize} \item De-normalized + \item De-percentage change + \item De-moving average \end{itemize} \end{minipage} @@ -265,6 +299,7 @@ \begin{minipage}[t][2cm][t]{\textwidth} \begin{itemize} \item Output is \textbf{normalized} (Invariant) + \item Target is \textbf{one} dataset, output only reflects that one \end{itemize} \end{minipage} @@ -292,29 +327,35 @@ \centerline{Comparing 6 metrics with respect to Exxon (Left), NASDAQ (Right)} \end{frame} - \begin{frame}\frametitle{Conclusion} + \begin{frame} + \frametitle{Conclusion} \begin{block}{Conclusion} \smallskip By leveraging multiple criteria to evaluate the proposed model such as \begin{itemize} \item MAE, MAPE, RMSE, MSE, R2-score (price prediction task) \smallskip + \item Accuracy (trend forecasting task) \end{itemize} - \bigskip - - We can proudly say that, the multi-feature model - \begin{itemize} - \item \textbf{Outperforms} the single-feature one in most cases and they are \textbf{extremely close} to each other in other scenarios. - \smallskip - \item Usually yields \textbf{better} result than the SOTA in almost every contexts. - \end{itemize} - \smallskip + \bigskip + + We can proudly say that, the multi-feature model + \begin{itemize} + \item \textbf{Outperforms} the single-feature one in most cases and they + are \textbf{extremely close} to each other in other scenarios. + \smallskip + + \item Usually yields \textbf{better} result than the SOTA in almost + every contexts. + \end{itemize} + \smallskip \end{block} \end{frame} \section{Summary} - \begin{frame}\frametitle{Outline} + \begin{frame} + \frametitle{Outline} \tableofcontents[currentsection] \end{frame} @@ -322,17 +363,23 @@ \begin{exampleblock}{Summary} \begin{itemize} \item We explore deep learning for challenging stock price prediction - \item Paving the way for new feature studies and applications in various deep learning models - \item Demonstrates correlation-based features and innovative neural networks improve stock - price prediction + + \item Paving the way for new feature studies and applications in various + deep learning models + + \item Demonstrates correlation-based features and innovative neural networks + improve stock price prediction \end{itemize} \end{exampleblock} \begin{block}{Further Research} \begin{itemize} \item Fine-tuning the architecture + \item Continuing improving processing methods + \item Comparing to other SOTA neural networks like KAN + \item Applying the architecture to other areas \end{itemize} \end{block} @@ -340,8 +387,9 @@ \miniframesoff \section*{} \begin{frame} - \begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title} - \usebeamerfont{title}Thank you for your attention!\par% + \begin{beamercolorbox} + [sep=8pt,center,shadow=true,rounded=true]{title} \usebeamerfont{title}Thank + you for your attention!\par% \end{beamercolorbox} \end{frame} \end{document} \ No newline at end of file