This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Copy this code into your browser's console to print out the text content of all tweets | |
// a thread that you are currently composing on Twitter desktop web. This will print out | |
// all the text of your tweets to the console, which you can then copy to a work/text doc, | |
// presumably for tweeting at a later time. Unofortunately, this does not work with images. | |
(function() { | |
// All your tweet inputs have a div with the attribute 'aria-label' set to 'Tweet text' | |
var tweetInputs = document.querySelectorAll('div[aria-label="Tweet text"]'); | |
var tweetTexts = []; | |
for (var i = 0; i < tweetInputs.length; i++) { |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import re | |
def colfind(self, query, ignore_case=True): | |
if type(self)!=pd.DataFrame: | |
raise Exception( | |
'colsearch method only applicable to pandas.DataFrame objects' | |
) | |
re_query = re.escape(query) | |
re_search = re.compile(re_query, (re.IGNORECASE if ignore_case else None)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Visualize HTML structure of a BeautifulSoup object with: | |
# - vertical connecting lines | |
# - option to remove attributes | |
# Forked from Dan Mattera's : https://gist.github.com/danmattera/ef11cb37c31d732f9e5d2347eea876c2 | |
# By Alex Miller https://alex.miller.im | |
from bs4 import BeautifulSoup as BS | |
def BeautifulSoup(X): | |
# This just sets the default parser for BeautifulSoup | |
# to "html.parser" so it doesn't alwasy add <html><body> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def handler(e): | |
import sys, urllib.parse, webbrowser | |
et = sys.exc_info()[0] | |
q = urllib.parse.quote_plus('"{}: {}" site:stackoverflow.com'.format(str(et)[8:-2], e)) | |
u = "https://www.google.com/search?q={}".format(q) | |
webbrowser.open_new_tab(u) | |
try: | |
1/0 | |
except Exception as e: |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Verifying my Blockstack ID is secured with the address 1Jju5B2v6Zskx3aeB1NnAk3MV5JBg3Mh8r https://explorer.blockstack.org/address/1Jju5B2v6Zskx3aeB1NnAk3MV5JBg3Mh8r |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
as_api_key = "###########################################################" | |
as_api_secret = "###########################################################" | |
params = {'grant_type':'client_credentials'} | |
unencoded_sig = "{}:{}".format(as_api_key, as_api_secret).encode('utf-8') | |
signature = b64encode(unencoded_sig).decode('utf-8') | |
headers = {'Authorization': "Basic {}".format(signature), | |
'Content-Type': 'application/x-www-form-urlencoded'} | |
response = requests.post('https://www.audiosear.ch/oauth/token', params=params, headers=headers) | |
result = response.json() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import base64 | |
import urllib | |
from PIL import Image | |
#Convert image to base64 thumbnail | |
def img2base64(img_link): | |
with open("/tmp/img_file.jpg", "wb") as f: | |
f.write(urllib.request.urlopen(img_link).read()) | |
tmp_img = np.asarray(Image.open("/tmp/img_file.jpg")) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/local/bin/python3.3 | |
# Gist available at: https://gist.github.com/alexmill/cd6ec9ebf1e5ee5fd314 | |
# From article hosted at: http://alex.miller.im/posts/bing-azure-api-authentication-python-requests/ | |
from urllib.parse import quote_plus | |
import json | |
import requests | |
def bing_search(query): | |
# Your base API URL |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* Track outbound links */ | |
/* Requires jQuery */ | |
/* Paste this immediately below your Google Analytics snippet */ | |
var outbounds = $("a").filter(function(index){ | |
try{ | |
var h = $(this).attr("href"); | |
return h.indexOf("http") > -1 && h.indexOf(window.location.host) == -1; | |
} catch(err){ | |
return false; | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/local/bin/python3.3 | |
from bs4 import BeautifulSoup as bs4 | |
import requests | |
import feedparser | |
import urllib.parse | |
def findfeed(site): | |
raw = requests.get(site).text | |
result = [] | |
possible_feeds = [] |