This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import React from 'react' | |
import {Route} from 'react-router-dom'; | |
import TransitionGroup from 'react-transition-group/TransitionGroup'; | |
import _ from 'lodash'; | |
import HomePage from 'components/Pages/Home/home.page'; | |
import ParallaxPage from 'components/Pages/Home/parallax.page'; | |
import NavigationPage from 'components/Pages/Navigation/navigation.page'; | |
import BlogListPage from 'components/Pages/Blog/blogList.page'; |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import React from "react"; | |
import { Switch, Route } from "react-router-dom"; | |
import asyncComponent from "../../helpers/AsyncFunc"; | |
class AppRouter extends React.Component { | |
render() { | |
const { url } = this.props; | |
return ( | |
<Switch> |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
import scrapy | |
import re | |
from scrapy import Request | |
import csv | |
import os | |
# todo: use scrapy-proxies to ensure we dont get throttled or banned by craigslist when sifting through urls and extractin data | |
class JobsSpider(scrapy.Spider): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -- OVERVIEW -- | |
# This script will handle the execution of a the CraigsList Jobs Scrapy crawler | |
# which gathers job listing contents. We then upload the results to a s3 bucket. | |
# save file with timestamp prepended | |
timestamp=$(date +%Y-%m-%d_%H-%M-%S) | |
filename="$timestamp"_results.json | |
# execute scrapy and store results in json file | |
scrapy crawl jobs -o $filename |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
# OVERVIEW | |
# This bash script handles the organization and compression for blog media items winthin the app | |
# when a directory is suppied to the script, the script loops through the directory's items and handles the | |
# renaming and compression accordingly. Currently, there is only one type of media item that is handled by the script | |
# -- image items - .JPG, .jpg | |
targetedFolder=$1 | |
# Ensure argument is a valid directory |