{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Loading WordVectors as word level features\n", "\n", "This notebook will show how to import word vectors into the `WordLevelFeatures` class." ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "from pyeeg.io import WordLevelFeatures" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "story_id = 0\n", "\n", "# Path to data ocntaining onsets of words and duration of speech segments\n", "env_path = '/media/hw2512/SeagateExpansionDrive/EEG_data/Katerina_experiment/story_parts/alignement_data/'\n", "wordfreq_path = '/media/hw2512/SeagateExpansionDrive/EEG_data/Katerina_experiment/story_parts/word_frequencies/'\n", "list_wordfreq_files = [item for item in os.listdir(wordfreq_path) if item.endswith('timed.csv')]\n", "list_stories = [item.strip('_word_freq_timed.csv') for item in list_wordfreq_files]\n", "list_env_files = [os.path.join(env_path, s, s + '_125Hz.Env') for s in list_stories]\n", "\n", "# Path to WordVectors\n", "wv_path = '/home/hw2512/MachineLearning/Word2Vec/GloVe-1.2/vectors_correct_header.txt'\n", "\n", "# Loading word onset and duration for one story:\n", "wo_path = os.path.join(wordfreq_path, list_wordfreq_files[story_id])\n", "duration_path = os.path.join(env_path, list_env_files[story_id])" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:pyeeg.io:Word flushington not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word bashfulest not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word adulation not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word blush not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word blush not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word paucity not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word fellow-men not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word fastened not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word flushington not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word scraggy not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word timid not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word deprecating not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word uninteresting not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word impervious not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word aimless not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word superstitiously not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word monosyllabic not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word laboriously not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word flushington not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word gyp not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word bedmaker not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word unaccountably not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word sluices not in word embedding model; will use rdm instead\n", "INFO:pyeeg.io:Word flushington not in word embedding model; will use rdm instead\n" ] } ], "source": [ "wf = WordLevelFeatures(path_praat_env=duration_path, path_wordonsets=wo_path, path_wordvectors=wv_path)" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "First few samples of first 4 dimensions:\n", "\n", "[[ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 1. 0.07879453 0.09271022 -0.1146835 ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 1. -0.74636799 0.196638 0.490275 ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]\n", " [ 0. 0. 0. 0. ]]\n" ] } ], "source": [ "feat_matrix = wf.align_word_features(features=['wordvectors'], srate=100, wordonset_feature=True)\n", "print(\"First few samples of first 4 dimensions:\\n\")\n", "print(feat_matrix[250:350, :4])" ] } ], "metadata": { "hide_input": false, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.8" }, "latex_envs": { "LaTeX_envs_menu_present": true, "autoclose": false, "autocomplete": true, "bibliofile": "biblio.bib", "cite_by": "apalike", "current_citInitial": 1, "eqLabelWithNumbers": true, "eqNumInitial": 1, "hotkeys": { "equation": "Ctrl-E", "itemize": "Ctrl-I" }, "labels_anchors": false, "latex_user_defs": false, "report_style_numbering": false, "user_envs_cfg": false }, "toc": { "base_numbering": 1, "nav_menu": {}, "number_sections": true, "sideBar": true, "skip_h1_title": false, "title_cell": "Table of Contents", "title_sidebar": "Contents", "toc_cell": false, "toc_position": {}, "toc_section_display": true, "toc_window_display": false }, "varInspector": { "cols": { "lenName": 16, "lenType": 16, "lenVar": 40 }, "kernels_config": { "python": { "delete_cmd_postfix": "", "delete_cmd_prefix": "del ", "library": "var_list.py", "varRefreshCmd": "print(var_dic_list())" }, "r": { "delete_cmd_postfix": ") ", "delete_cmd_prefix": "rm(", "library": "var_list.r", "varRefreshCmd": "cat(var_dic_list()) " } }, "types_to_exclude": [ "module", "function", "builtin_function_or_method", "instance", "_Feature" ], "window_display": false } }, "nbformat": 4, "nbformat_minor": 2 }