Last active
December 7, 2024 22:01
-
-
Save SaoYan/32cf28b4689d3d9b077cc96d105a31df to your computer and use it in GitHub Desktop.
A Tour of Go Exercise: Web Crawler
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package main | |
import ( | |
"fmt" | |
"sync" | |
) | |
/* | |
This solution uses channels to force each gorountines to wait for its child gorountines to exit. | |
*/ | |
type SafeCounter struct { | |
v map[string]bool | |
mux sync.Mutex | |
} | |
type Fetcher interface { | |
// Fetch returns the body of URL and | |
// a slice of URLs found on that page. | |
Fetch(url string) (body string, urls []string, err error) | |
} | |
var cnt SafeCounter = SafeCounter{v: make(map[string]bool)} | |
// Crawl uses fetcher to recursively crawl | |
// pages starting with url, to a maximum of depth. | |
func Crawl(url string, depth int, fetcher Fetcher, exit chan bool) { | |
// Fetch URLs in parallel. | |
// Don't fetch the same URL twice. | |
if depth <= 0 { | |
exit <- true | |
return | |
} | |
cnt.mux.Lock() | |
_, ok := cnt.v[url] | |
if ok == false { | |
cnt.v[url] = true | |
cnt.mux.Unlock() | |
} else { | |
exit <- true | |
cnt.mux.Unlock() | |
return | |
} | |
body, urls, err := fetcher.Fetch(url) | |
if err != nil { | |
fmt.Println(err) | |
exit <- true | |
return | |
} | |
fmt.Printf("found: %s %q\n", url, body) | |
e := make(chan bool) | |
for _, u := range urls { | |
go Crawl(u, depth-1, fetcher, e) | |
} | |
// wait for all child gorountines to exit | |
for i := 0; i < len(urls); i++ { | |
<-e | |
} | |
exit <- true | |
} | |
func main() { | |
exit := make(chan bool) | |
go Crawl("https://golang.org/", 4, fetcher, exit) | |
<-exit | |
} | |
// fakeFetcher is Fetcher that returns canned results. | |
type fakeFetcher map[string]*fakeResult | |
type fakeResult struct { | |
body string | |
urls []string | |
} | |
func (f fakeFetcher) Fetch(url string) (string, []string, error) { | |
if res, ok := f[url]; ok { | |
return res.body, res.urls, nil | |
} | |
return "", nil, fmt.Errorf("not found: %s", url) | |
} | |
// fetcher is a populated fakeFetcher. | |
var fetcher = fakeFetcher{ | |
"https://golang.org/": &fakeResult{ | |
"The Go Programming Language", | |
[]string{ | |
"https://golang.org/pkg/", | |
"https://golang.org/cmd/", | |
}, | |
}, | |
"https://golang.org/pkg/": &fakeResult{ | |
"Packages", | |
[]string{ | |
"https://golang.org/", | |
"https://golang.org/cmd/", | |
"https://golang.org/pkg/fmt/", | |
"https://golang.org/pkg/os/", | |
}, | |
}, | |
"https://golang.org/pkg/fmt/": &fakeResult{ | |
"Package fmt", | |
[]string{ | |
"https://golang.org/", | |
"https://golang.org/pkg/", | |
}, | |
}, | |
"https://golang.org/pkg/os/": &fakeResult{ | |
"Package os", | |
[]string{ | |
"https://golang.org/", | |
"https://golang.org/pkg/", | |
}, | |
}, | |
} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package main | |
import ( | |
"fmt" | |
"sync" | |
) | |
/* | |
This solution uses WaitGroup to force each gorountines to wait for its child gorountines to exit. | |
*/ | |
type SafeCounter struct { | |
v map[string]bool | |
mux sync.Mutex | |
} | |
type Fetcher interface { | |
// Fetch returns the body of URL and | |
// a slice of URLs found on that page. | |
Fetch(url string) (body string, urls []string, err error) | |
} | |
var cnt SafeCounter = SafeCounter{v: make(map[string]bool)} | |
// Crawl uses fetcher to recursively crawl | |
// pages starting with url, to a maximum of depth. | |
func Crawl(url string, depth int, fetcher Fetcher, wg *sync.WaitGroup) { | |
// Fetch URLs in parallel. | |
// Don't fetch the same URL twice. | |
if depth <= 0 { | |
wg.Done() | |
return | |
} | |
cnt.mux.Lock() | |
_, ok := cnt.v[url] | |
if ok == false { | |
cnt.v[url] = true | |
cnt.mux.Unlock() | |
} else { | |
wg.Done() | |
cnt.mux.Unlock() | |
return | |
} | |
body, urls, err := fetcher.Fetch(url) | |
if err != nil { | |
fmt.Println(err) | |
wg.Done() | |
return | |
} | |
fmt.Printf("found: %s %q\n", url, body) | |
var wg_ sync.WaitGroup | |
for _, u := range urls { | |
wg_.Add(1) | |
go Crawl(u, depth-1, fetcher, &wg_) | |
} | |
wg_.Wait() | |
wg.Done() | |
} | |
func main() { | |
var wg sync.WaitGroup | |
go Crawl("https://golang.org/", 4, fetcher, &wg) | |
wg.Add(1) | |
wg.Wait() | |
} | |
// fakeFetcher is Fetcher that returns canned results. | |
type fakeFetcher map[string]*fakeResult | |
type fakeResult struct { | |
body string | |
urls []string | |
} | |
func (f fakeFetcher) Fetch(url string) (string, []string, error) { | |
if res, ok := f[url]; ok { | |
return res.body, res.urls, nil | |
} | |
return "", nil, fmt.Errorf("not found: %s", url) | |
} | |
// fetcher is a populated fakeFetcher. | |
var fetcher = fakeFetcher{ | |
"https://golang.org/": &fakeResult{ | |
"The Go Programming Language", | |
[]string{ | |
"https://golang.org/pkg/", | |
"https://golang.org/cmd/", | |
}, | |
}, | |
"https://golang.org/pkg/": &fakeResult{ | |
"Packages", | |
[]string{ | |
"https://golang.org/", | |
"https://golang.org/cmd/", | |
"https://golang.org/pkg/fmt/", | |
"https://golang.org/pkg/os/", | |
}, | |
}, | |
"https://golang.org/pkg/fmt/": &fakeResult{ | |
"Package fmt", | |
[]string{ | |
"https://golang.org/", | |
"https://golang.org/pkg/", | |
}, | |
}, | |
"https://golang.org/pkg/os/": &fakeResult{ | |
"Package os", | |
[]string{ | |
"https://golang.org/", | |
"https://golang.org/pkg/", | |
}, | |
}, | |
} |
curbol
commented
Jun 14, 2023
Using WaitGroup and I am not a big fan of global variable
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, checker urlChecker, wg *sync.WaitGroup) {
defer wg.Done()
if depth <= 0 {
return
}
checker.Set(url)
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, u := range urls {
isExists := checker.Get(u)
if !isExists {
wg.Add(1)
go Crawl(u, depth-1, fetcher, checker, wg)
}
}
return
}
func main() {
var wg sync.WaitGroup
wg.Add(1)
checker := urlChecker {visited: make(map[string]bool)}
Crawl("https://golang.org/", 4, fetcher, checker, &wg)
wg.Wait()
}
type urlChecker struct {
mut sync.Mutex
visited map[string]bool
}
func (checker *urlChecker) Set(url string) {
checker.mut.Lock()
checker.visited[url] = true
checker.mut.Unlock()
}
func (checker *urlChecker) Get(url string) bool {
checker.mut.Lock()
defer checker.mut.Unlock()
_, ok := checker.visited[url]
return ok
}
type fakeResult struct {
body string
urls []string
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
Using only what has been learned from previous lessons (select, <-exit, Mutex), didn't change the main function, but refactored the Crawl function to increase readability in code.
package main
import (
"fmt"
"sync"
)
// ADDED (3): SET UP MAP STRUCT FOR FETCHED URLS (mutual exclusion)
type FetchedUrls struct {
mu sync.Mutex
crawled map[string]bool
}
func (f *FetchedUrls) Add(key string) {
f.mu.Lock()
f.crawled[key] = true
f.mu.Unlock()
}
func (f *FetchedUrls) Exists(key string) bool {
f.mu.Lock()
defer f.mu.Unlock()
return f.crawled[key]
}
// ADDED (1): STRUCT FOR MAIN CHANNEL
type FakeResponse struct {
body string
url string
err error
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
fetchedUrls := FetchedUrls{crawled: map[string]bool{}}
ch := make(chan FakeResponse)
exit := make(chan bool)
go crawlHelper(url, depth, fetcher, fetchedUrls, ch, exit)
for { // infinite loop ...
select { // ... with a listener
case fakeResponse := <-ch:
if fakeResponse.err != nil {
fmt.Println(fakeResponse.err)
} else {
fmt.Printf("found: %s %q\n", fakeResponse.url, fakeResponse.body)
}
case <-exit:
return // terminate listening to all goroutines
}
}
}
// ADDED (1): RECURSIVE LOOP
func crawlHelper(url string, depth int,
fetcher Fetcher, fetchedUrls FetchedUrls,
ch chan FakeResponse, exit chan bool) {
fetchedUrls.Add(url)
body, urls, err := fetcher.Fetch(url)
ch <- FakeResponse{body, url, err}
if depth > 1 {
children := 0
for _, u := range urls {
if !fetchedUrls.Exists(u) { // only create a new goroutine if url hasn't been fetched
children++
go crawlHelper(u, depth-1, fetcher, fetchedUrls, ch, exit)
}
}
for i := 0; i < children; i++ {
<-exit // collect exit calls to prevent calling parent directly
}
}
exit <- true // call parent when this goroutine and its children are done
}
func main() {
Crawl("https://golang.org/", 4, fetcher)
}
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
Just structure with map + mutex and populating channel
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
type SafeCrawl struct {
crawled map[string]int
mu sync.Mutex
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func CrawlRecursive(url string, depth int, fetcher Fetcher, ch chan string, sc SafeCrawl) {
sc.mu.Lock()
if depth <= 0 || sc.crawled[url] != 0 {
return
}
sc.crawled[url] = 1
sc.mu.Unlock()
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
ch <- fmt.Sprintf("found: %s %q", url, body)
for _, u := range urls {
CrawlRecursive(u, depth-1, fetcher, ch, sc)
}
}
func Crawl(url string, depth int, fetcher Fetcher, ch chan string) {
sc := SafeCrawl{crawled : make(map[string]int)}
CrawlRecursive(url, depth, fetcher, ch, sc)
close(ch)
}
func main() {
ch := make(chan string)
go Crawl("https://golang.org/", 4, fetcher, ch)
for crawled := range ch {
fmt.Println(crawled)
}
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
@Fleyderer
i'd say you need to call CrawlRecursive
with go
every time in order to properly make it concurrent
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
type urlCache struct {
sync.RWMutex
m map[string]bool
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, wg *sync.WaitGroup, cache *urlCache) {
if depth <= 0 {
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, u := range urls {
cache.RLock()
_, ok := cache.m[u]
cache.RUnlock()
if ok {
continue
}
cache.Lock()
cache.m[u] = true
cache.Unlock()
wg.Add(1)
go func() {
defer wg.Done()
Crawl(u, depth-1, fetcher, wg, cache)
}()
}
return
}
func main() {
wg := sync.WaitGroup{}
cache := urlCache{m: make(map[string]bool)}
wg.Add(1)
go func() {
defer wg.Done()
Crawl("https://golang.org/", 4, fetcher, &wg, &cache)
}()
wg.Wait()
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
re: https://gist.github.com/SaoYan/32cf28b4689d3d9b077cc96d105a31df?permalink_comment_id=4667391#gistcomment-4667391
there is a race condition in get/set.. think about it
This is what I have done
package main
import (
"fmt"
"sync"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
type Cache struct {
mu sync.Mutex
fetchedUrls map[string]bool
}
var end = make(chan int)
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, cache *Cache) {
// TODO: Fetch URLs in parallel.
// TODO: Don't fetch the same URL twice.
// This implementation doesn't do either:
if depth <= 0 {
return
}
body, urls, err := fetcher.Fetch(url)
(*cache).mu.Lock()
(*cache).fetchedUrls[url] = true
(*cache).mu.Unlock()
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, url := range urls {
if _, ok := (*cache).fetchedUrls[url]; ok {
continue
}
go func() {
end <- 1
Crawl(url, depth-1, fetcher, cache)
}()
<-end
}
return
}
func main() {
cache := Cache{
fetchedUrls: make(map[string]bool),
}
Crawl("https://golang.org/", 4, fetcher, &cache)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
You just need to the semantics of defer. No need WaitGroup. No need Channel.
package main
import (
"fmt"
"sync"
"time"
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
type UrlCache struct {
mu sync.Mutex
cache map[string]string
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, urlCache *UrlCache) {
// TODO: Fetch URLs in parallel.
// TODO: Don't fetch the same URL twice.
// This implementation doesn't do either:
if depth <= 0 {
return
}
urlCache.mu.Lock()
_, ok := urlCache.cache[url]
defer urlCache.mu.Unlock()
if ok {
return
}
body, urls, err := fetcher.Fetch(url)
urlCache.cache[url] = url
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("found: %s %q\n", url, body)
for _, u := range urls {
go Crawl(u, depth-1, fetcher, urlCache)
}
return
}
func main() {
cache := UrlCache{cache: make(map[string]string)}
go Crawl("https://golang.org/", 4, fetcher, &cache)
time.Sleep(time.Second)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment