Last active
April 9, 2026 03:47
-
-
Save djma/273d0e7f2359a7752939f19eb61bb77a to your computer and use it in GitHub Desktop.
Turn your Spotify Liked Songs into a csv
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /** | |
| // Spotify Liked Songs Scraper | |
| // Run once and it will automatically capture songs as you scroll | |
| How to use: | |
| Go to your Liked Songs: https://open.spotify.com/collection/tracks | |
| View as Compact Mode | |
| Paste the code below into your browser's console | |
| It will automatically start capturing songs as you scroll - no need to run anything manually | |
| You'll see ✓ Added X new song(s) messages as you scroll | |
| Once you're done scrolling, run exportCSV() to get the CSV | |
| Run stopScraping() if you want to stop watching for new rows | |
| This script will break when Spotify changes its layout. | |
| */ | |
| const songs = []; | |
| const scrapedIds = new Set(); | |
| function extractSongData(row) { | |
| const cells = row.querySelectorAll('[role="gridcell"]'); | |
| if (cells.length < 6) return null; | |
| try { | |
| const number = cells[0].querySelector('span[data-encore-id="text"]')?.textContent.trim(); | |
| const title = cells[1].querySelector('[data-testid="internal-track-link"] div')?.textContent.trim(); | |
| const artist = cells[2].querySelector('a')?.textContent.trim(); | |
| const album = cells[3].querySelector('a')?.textContent.trim(); | |
| const date = cells[4].querySelector('span[data-encore-id="text"]')?.textContent.trim(); | |
| // Check for the greyed out class | |
| const tracklistRow = row.querySelector('[data-testid="tracklist-row"]'); | |
| const isGreyedOut = tracklistRow?.classList.contains('UmAgXoqvtCcFGNtZ') || false; | |
| if (number && title && artist) { | |
| return { number, title, artist, album, dateAdded: date, isSpotifyGreyedOut: isGreyedOut }; | |
| } | |
| } catch (e) { | |
| // skip rows with parsing errors | |
| } | |
| return null; | |
| } | |
| function processNewRows() { | |
| const rows = document.querySelectorAll('[role="row"]'); | |
| let newCount = 0; | |
| rows.forEach(row => { | |
| const rowId = row.getAttribute('aria-rowindex'); | |
| if (rowId && !scrapedIds.has(rowId)) { | |
| const song = extractSongData(row); | |
| if (song) { | |
| songs.push(song); | |
| scrapedIds.add(rowId); | |
| newCount++; | |
| } | |
| } | |
| }); | |
| if (newCount > 0) { | |
| console.log(`✓ Added ${newCount} new song(s). Total: ${songs.length}`); | |
| } | |
| return newCount; | |
| } | |
| const observer = new MutationObserver(() => { | |
| processNewRows(); | |
| }); | |
| observer.observe(document.body, { | |
| childList: true, | |
| subtree: true | |
| }); | |
| function exportCSV() { | |
| if (songs.length === 0) { | |
| console.log('No songs scraped yet. Keep scrolling!'); | |
| return; | |
| } | |
| const headers = ['Number', 'Title', 'Artist', 'Album', 'Date Added', 'Is Greyed Out']; | |
| const rows = songs.map(song => [ | |
| song.number, | |
| `"${song.title.replace(/"/g, '""')}"`, | |
| `"${song.artist.replace(/"/g, '""')}"`, | |
| `"${song.album.replace(/"/g, '""')}"`, | |
| song.dateAdded, | |
| song.isSpotifyGreyedOut ? 'TRUE' : 'FALSE' | |
| ]); | |
| const csv = [headers, ...rows].map(row => row.join(',')).join('\n'); | |
| console.log(csv); | |
| navigator.clipboard.writeText(csv).then(() => { | |
| console.log(`✓ CSV copied to clipboard (${songs.length} songs)`); | |
| }); | |
| return csv; | |
| } | |
| function stopScraping() { | |
| observer.disconnect(); | |
| console.log('✓ Scraping stopped'); | |
| } | |
| processNewRows(); | |
| console.log('🎵 Spotify scraper active - scroll to load and capture songs automatically'); | |
| console.log('Commands: exportCSV() | stopScraping() | songs'); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment