如何在開發的過程中加入測試。
- Model
- Repository
- Controller
- Auth
| # delete local tag '12345' | |
| git tag -d 12345 | |
| # delete remote tag '12345' (eg, GitHub version too) | |
| git push origin :refs/tags/12345 | |
| # alternative approach | |
| git push --delete origin tagName | |
| git tag -d tagName |
| from math import ( | |
| pi, | |
| cos, | |
| sin, | |
| log, | |
| atan, | |
| exp | |
| ) |
| #!/bin/sh | |
| # | |
| # chkconfig: 35 99 99 | |
| # description: Node.js /home/nodejs/sample/app.js | |
| # | |
| . /etc/rc.d/init.d/functions | |
| USER="nodejs" |
| import tensorflow as tf | |
| import numpy as np | |
| import time | |
| N=10000 | |
| K=4 | |
| MAX_ITERS = 1000 | |
| start = time.time() |
| http://d.stavrovski.net/blog/post/how-to-install-and-setup-oracle-java-jdk-in-centos-6 | |
| # rpm | |
| wget --no-cookies \ | |
| --no-check-certificate \ | |
| --header "Cookie: oraclelicense=accept-securebackup-cookie" \ | |
| "http://download.oracle.com/otn-pub/java/jdk/7u55-b13/jdk-7u55-linux-x64.rpm" \ | |
| -O jdk-7-linux-x64.rpm | |
| # ubuntu |
| core-site.xml | |
| ================================= | |
| <property> | |
| <name>fs.defaultFS</name> | |
| <value>hdfs://ec2-54-148-213-237.us-west-2.compute.amazonaws.com</value> | |
| </property> | |
| <property> | |
| <name>hadoop.tmp.dir</name> | |
| <value>/home/hadoop/local/var/hadoop/tmp/hadoop-${user.name}</value> | |
| </property> |
| # coding=utf-8 | |
| # Goal: parse house information for each district from websites | |
| # for each district, get 「土地區段位置或建物區門牌」,「建物型態」,「建物現況格局」,「坪數」,「屋齡」,「總價元」,「資料來源」into csv file | |
| # Procedure: | |
| # 1. get the number of page for each district by parsing first html content | |
| # 2. for each district put all html page together, use htmlparser to parse content and save data into file | |
| import sys | |
| import math |
| package thunder.streaming | |
| import org.apache.spark.{SparkConf, Logging} | |
| import org.apache.spark.rdd.RDD | |
| import org.apache.spark.SparkContext._ | |
| import org.apache.spark.streaming._ | |
| import org.apache.spark.streaming.dstream.DStream | |
| import org.apache.spark.mllib.clustering.KMeansModel | |
| import scala.util.Random.nextDouble |