Skip to content

Instantly share code, notes, and snippets.

View silent-vim's full-sized avatar
🎯
Focusing

Dhruv Arya silent-vim

🎯
Focusing
View GitHub Profile
sudo apt-get -y install software-properties-common
sudo add-apt-repository -y ppa:certbot/certbot
sudo apt-get -y update
sudo apt-get -y install certbot
sudo service openvpnas stop
sudo certbot certonly \
--standalone \
--non-interactive \
@silent-vim
silent-vim / elasticsearch_cluster.yml
Created September 9, 2018 02:53 — forked from patrickleet/elasticsearch_cluster.yml
docker stack for elasticsearch cluster
version: "3"
networks:
default:
proxy:
external: true
volumes:
@silent-vim
silent-vim / attention_layer_pytorch.py
Created March 19, 2018 21:10 — forked from thomwolf/attention_layer_pytorch.py
A pyTorch attention layer for torchMoji model
class Attention(Module):
"""
Computes a weighted average of channels across timesteps (1 parameter pr. channel).
"""
def __init__(self, attention_size, return_attention=False):
""" Initialize the attention layer
# Arguments:
attention_size: Size of the attention vector.
return_attention: If true, output will include the weight for each input token
used for the prediction
@silent-vim
silent-vim / kafka-stack-docker-compose.yml
Last active February 15, 2018 02:48 — forked from everpeace/docker-compose.yml
kafka cluster in docker-compose.
# WARNING: This docker-compose.yml is only for testing purpose.
# Parameters:
# - name: CONFLUENT_PLATFORM_VERSION
# default: 3.0.0
# reference: https://hub.docker.com/u/confluentinc/
# Ports:
# - description: Major ports are exposed to host computer
# - zookeeper: 2181
# kafka1: 9091
# kafka2: 9092
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
@silent-vim
silent-vim / dl_wontreadlist.md
Created September 23, 2017 23:14 — forked from garibarba/dl_wontreadlist.md
Deep Learning Won't-Read List
@silent-vim
silent-vim / attentional-LSTMCell.py
Created September 23, 2017 23:13
pytorch attentional LSTM cell
class attentionalLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size, num_variants):
super(attentionalLSTMCell, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.num_variants = num_variants
self.ih = nn.Linear(self.input_size, 4 * self.hidden_size * self.num_variants)
self.hh = nn.Linear(self.hidden_size, 4 * self.hidden_size * self.num_variants)
package com.learndirect.tutor;
import static com.google.common.base.Charsets.UTF_8;
import static org.apache.commons.codec.binary.Base64.decodeBase64;
import static org.apache.commons.codec.binary.Base64.encodeBase64;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Map;