sudo add-apt-repository ppa:gnome-terminator
sudo apt-get update
sudo apt-get install terminator
Terminator should be setup as default now. Restart your terminal (shortcut: "Ctrl+Alt+T").
from graphviz import Digraph | |
import re | |
import torch | |
import torch.nn.functional as F | |
from torch.autograd import Variable | |
from torch.autograd import Variable | |
import torchvision.models as models | |
def make_dot(var): |
Prereq:
apt-get install zsh
apt-get install git-core
Getting zsh to work in ubuntu is weird, since sh
does not understand the source
command. So, you do this to install zsh
wget https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh -O - | zsh
1.Create desktop config file | |
sudo nano /usr/share/applications/smartsvn.desktop | |
2. Put the text below | |
[Desktop Entry] | |
Name=SmartSVN | |
Name[en]=SmartSVN | |
Name[ru]=SmartSVN |
from __future__ import division | |
import string | |
import math | |
tokenize = lambda doc: doc.lower().split(" ") | |
document_0 = "China has a strong economy that is growing at a rapid pace. However politically it differs greatly from the US Economy." | |
document_1 = "At last, China seems serious about confronting an endemic problem: domestic violence and corruption." | |
document_2 = "Japan's prime minister, Shinzo Abe, is working towards healing the economic turmoil in his own country for his view on the future of his people." | |
document_3 = "Vladimir Putin is working hard to fix the economy in Russia as the Ruble has tumbled." |
SELECT
distinct pro.name,
(SELECT count(*) FROM details det2 WHERE det2.detail = 'good' AND det2.product_id = det.product_id) AS good,
(SELECT count(*) FROM details det2 WHERE det2.detail = 'ok' AND det2.product_id = det.product_id) AS ok,
(SELECT count(*) FROM details det2 WHERE det2.detail = 'bad' AND det2.product_id = det.product_id) AS bad
"""Sequence-to-sequence model with an attention mechanism.""" | |
# see https://www.tensorflow.org/versions/r0.10/tutorials/seq2seq/index.html | |
# compare https://github.com/tflearn/tflearn/blob/master/examples/nlp/seq2seq_example.py | |
from __future__ import print_function | |
import numpy as np | |
import tensorflow as tf | |
vocab_size=256 # We are lazy, so we avoid fency mapping and just use one *class* per character/byte | |
target_vocab_size=vocab_size | |
learning_rate=0.1 |
#!/bin/bash | |
# install dependencies | |
sudo apt-get update | |
sudo apt-get install -y build-essential | |
sudo apt-get install -y cmake | |
sudo apt-get install -y libgtk2.0-dev | |
sudo apt-get install -y pkg-config | |
sudo apt-get install -y python-numpy python-dev | |
sudo apt-get install -y libavcodec-dev libavformat-dev libswscale-dev | |
sudo apt-get install -y libjpeg-dev libpng12-dev libtiff5-dev libjasper-dev |
""" | |
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) | |
BSD License | |
""" | |
import numpy as np | |
# data I/O | |
data = open('input.txt', 'r').read() # should be simple plain text file | |
chars = list(set(data)) | |
data_size, vocab_size = len(data), len(chars) |