aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Ogden <max@maxogden.com>2016-08-05 12:36:08 -0700
committerMax Ogden <max@maxogden.com>2016-08-05 12:36:08 -0700
commit416108575f8207d687c52ca079117bcc5cfdb515 (patch)
treea6b6a2d5624b8edcbb6e4542714785f91e581649
parent6879daedad2d53e32d1d9d7bf8b1c74574bc28ef (diff)
parentffb465bb3bcebb62c5e63cc3ed80d70274848deb (diff)
downloaddat-docs-416108575f8207d687c52ca079117bcc5cfdb515.tar.gz
dat-docs-416108575f8207d687c52ca079117bcc5cfdb515.zip
Merge branch 'master' of https://github.com/datproject/docs
-rw-r--r--README.md37
-rw-r--r--assets/arch.png (renamed from arch.png)bin345879 -> 345879 bytes
-rw-r--r--assets/cdc.png (renamed from meta/cdc.png)bin91078 -> 91078 bytes
-rw-r--r--assets/styles.css10
-rw-r--r--build.js6
-rw-r--r--contents.json17
-rw-r--r--dat-data.pngbin19900 -> 0 bytes
-rw-r--r--diy-dat.md73
-rw-r--r--docs/api.md (renamed from api.md)0
-rw-r--r--docs/contents.json26
-rw-r--r--docs/diy-dat.md41
-rw-r--r--docs/ecosystem.md (renamed from ecosystem.md)0
-rw-r--r--docs/how-dat-works.md (renamed from how-dat-works.md)4
-rw-r--r--docs/hyperdrive_spec.md (renamed from hyperdrive.md)0
-rw-r--r--docs/readme.md6
-rw-r--r--docs/sleep.md (renamed from sleep.md)0
-rw-r--r--docs/welcome.md (renamed from welcome.md)0
-rw-r--r--meta/changelog.md5
-rw-r--r--package.json23
-rw-r--r--styles.css4
20 files changed, 136 insertions, 116 deletions
diff --git a/README.md b/README.md
index fdc011c..07459c1 100644
--- a/README.md
+++ b/README.md
@@ -8,16 +8,45 @@ Repository for the documentation of the Dat Project ecosystem. View the docs at
## Writing & Editing Docs
+[See docs folder](docs/readme.md) for information on editing and adding docs. Once you finish editing the docs, follow the update & deploy docs instructions.
+
+## Installation & Usage
+
This documentation uses [minidocs](https://www.npmjs.com/package/minidocs).
-The table of contents is in `content.json`.
+### Viewing Docs Locally
+
+1. Clone Repository
+2. `npm install`
+3. `npm run build:local` to build the docs for local viewing
+4. `npm runs start` to view the docs in browser
-### Installation & Usage
+### Updating & Deploying Docs
1. Clone Repository
2. `npm install`
-3. `npm run build` to build the docs.
-4. `npm run deploy` to deploy docs to GitHub pages.
+3. Make documentation edits
+4. `npm run deploy` to build docs & deploy to GitHub pages.`
+
+### Updating External Module Docs
+
+We use [ecosystem-docs](https://github.com/hughsk/ecosystem-docs) to get documentation from other modules.
+
+1. `npm run update` will update the list of repositories and download the latest docs.
+2. Deploy!
+
+## NPM Commands
+
+All the npm commands
+
+* `npm run deploy`: build and deploy to github
+* `npm run build:deploy`: build full html pages for deployment
+* `npm run build:local`: build app for local viewing
+* `npm start`: start budo server to view locally
+* `npm update`: run both update commands
+* `npm run update:list` updates the repository list only
+* `npm run update:build` downloads the latest readme to the docs folder.
+* `npm run paper`: create the paper with pandoc
## License
diff --git a/arch.png b/assets/arch.png
index 9be39b9..9be39b9 100644
--- a/arch.png
+++ b/assets/arch.png
Binary files differ
diff --git a/meta/cdc.png b/assets/cdc.png
index ce859fc..ce859fc 100644
--- a/meta/cdc.png
+++ b/assets/cdc.png
Binary files differ
diff --git a/assets/styles.css b/assets/styles.css
new file mode 100644
index 0000000..a6888f6
--- /dev/null
+++ b/assets/styles.css
@@ -0,0 +1,10 @@
+.minidocs-logo {
+ max-height: 100px;
+ width: inherit !important;
+ margin: 0 auto;
+ display: block;
+}
+
+.minidocs-content {
+ padding-bottom: 50px;
+} \ No newline at end of file
diff --git a/build.js b/build.js
new file mode 100644
index 0000000..33d1afe
--- /dev/null
+++ b/build.js
@@ -0,0 +1,6 @@
+var fs = require('fs')
+var ndjson = require('ndjson')
+
+process.stdin.pipe(ndjson.parse()).on('data', function (obj) {
+ fs.writeFileSync('docs/' + obj.name + '.md', obj.readme)
+}) \ No newline at end of file
diff --git a/contents.json b/contents.json
deleted file mode 100644
index 76ec577..0000000
--- a/contents.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "Introduction": {
- "Welcome to Dat": "welcome.md",
- "How Dat Works": "how-dat-works.md"
- },
- "Ecosystem": {
- "Overview": "ecosystem.md"
- },
- "Specification": {
- "hyperdrive": "hyperdrive.md",
- "sleep": "sleep.md"
- },
- "References": {
- "API": "api.md",
- "DIY Dat": "diy-dat.md"
- }
-}
diff --git a/dat-data.png b/dat-data.png
deleted file mode 100644
index c8d647b..0000000
--- a/dat-data.png
+++ /dev/null
Binary files differ
diff --git a/diy-dat.md b/diy-dat.md
deleted file mode 100644
index f95eb11..0000000
--- a/diy-dat.md
+++ /dev/null
@@ -1,73 +0,0 @@
-# DIY Dat
-
-This document shows how to write your own compatible `dat` client using node modules.
-
-The three essential node modules are called [hyperdrive](https://npmjs.org/hyperdrive), [discovery-swarm](https://npmjs.org/discovery-swarm) and [level](https://npmjs.org/level). Hyperdrive does file synchronization and versioning, discovery-swarm does peer discovery over local networks and the Internet, and level provides a local LevelDB for storing metadata. More details are available in [How Dat Works](how-dat-works.md). The [dat](https://npmjs.org/dat) module itself is just some code that combines these modules and wraps them in a command-line API.
-
-Here's the minimal code needed to download data from a dat:
-
-```js
-var Swarm = require('discovery-swarm')
-var Hyperdrive = require('hyperdrive')
-var Level = require('level')
-
-// run this like: node thisfile.js 4c325f7874b4070blahblahetc
-// the dat link someone sent us, we want to download the data from it
-var link = new Buffer(process.argv[2], 'hex')
-
-// here is the default config dat uses
-// used for MDNS and also as the dns 'app name', you prob shouldnt change this
-var DAT_DOMAIN = 'dat.local'
-// dat will try this first and pick the first open port if its taken
-var DEFAULT_LOCAL_PORT = 3282
-// we run the servers below you can use if you want or run your own
-var DEFAULT_DISCOVERY = [
- 'discovery1.publicbits.org',
- 'discovery2.publicbits.org'
-]
-var DEFAULT_BOOTSTRAP = [
- 'bootstrap1.publicbits.org:6881',
- 'bootstrap2.publicbits.org:6881',
- 'bootstrap3.publicbits.org:6881',
- 'bootstrap4.publicbits.org:6881'
-]
-
-var db = Level('./dat.db')
-var drive = Hyperdrive(db)
-var swarm = Swarm({
- id: drive.core.id,
- dns: {server: DEFAULT_DISCOVERY, domain: DAT_DOMAIN},
- dht: {bootstrap: DEFAULT_BOOTSTRAP},
- stream: function () {
- // this is how the swarm and hyperdrive interface
- console.log('new peer stream')
- return drive.createPeerStream()
- }
-})
-
-swarm.once('listening', function () {
- console.log('joining', link)
- // join the swarm
- swarm.join(new Buffer(link, 'hex'))
- // tell hyperdrive to start downloading/uploading in ./data
- var archive = drive.get(link, process.cwd() + '/data')
- archive.ready(function (err) {
- console.log('archive ready')
- // a stream of all metadata. after retrieving each entry metadata will be cached locally
- // but the first time it has to fetch it from the swarm
- var metadata = archive.createEntryStream()
- // start downloading all entries, or choose your own filter logic to download specific entries
- // entries will be either files or directories
- metadata.on('data', function (entry) {
- var dl = archive.download(entry)
- console.log('downloading', entry.name, dl)
-
- dl.on('ready', function () {
- console.log('download started', entry.name, dl)
- })
- })
- })
-})
-
-swarm.listen(DEFAULT_LOCAL_PORT)
-```
diff --git a/api.md b/docs/api.md
index 68a7c3f..68a7c3f 100644
--- a/api.md
+++ b/docs/api.md
diff --git a/docs/contents.json b/docs/contents.json
new file mode 100644
index 0000000..83f5d73
--- /dev/null
+++ b/docs/contents.json
@@ -0,0 +1,26 @@
+{
+ "Introduction": {
+ "Welcome to Dat": "welcome.md",
+ "How Dat Works": "how-dat-works.md"
+ },
+ "Specification": {
+ "hyperdrive spec": "hyperdrive_spec.md",
+ "sleep": "sleep.md"
+ },
+ "References": {
+ "API": "api.md",
+ "DIY Dat": "diy-dat.md"
+ },
+ "Modules": {
+ "Overview": "ecosystem.md",
+ "Interface": {
+ "Dat Command Line": "dat.md",
+ "dat.land": "dat.land.md",
+ "Dat Desktop": "dat-desktop.md"
+ },
+ "Core": {
+ "Hyperdrive": "hyperdrive.md",
+ "Hypercore": "hypercore.md"
+ }
+ }
+}
diff --git a/docs/diy-dat.md b/docs/diy-dat.md
new file mode 100644
index 0000000..fb16fc1
--- /dev/null
+++ b/docs/diy-dat.md
@@ -0,0 +1,41 @@
+# DIY Dat
+
+This document shows how to write your own compatible `dat` client using node modules.
+
+The three essential node modules are called [hyperdrive](https://npmjs.org/hyperdrive), [hyperdrive-archive-swarm](https://npmjs.org/hyperdrive-archive-swarm) and [level](https://npmjs.org/level). Hyperdrive does file synchronization and versioning, hyperdrive-archive-swarm does peer discovery over local networks and the Internet, and level provides a local LevelDB for storing metadata. More details are available in [How Dat Works](how-dat-works.md). The [dat](https://npmjs.org/dat) module itself is just some code that combines these modules and wraps them in a command-line API.
+
+Here's the minimal code needed to download data from a dat:
+
+```js
+// run this like: node thisfile.js 4c325f7874b4070blahblahetc
+// the dat link someone sent us, we want to download the data from it
+var link = new Buffer(process.argv[2], 'hex')
+
+var Hyperdrive = require('hyperdrive')
+var Swarm = require('hyperdrive-archive-swarm')
+var level = require('level')
+var raf = require('random-access-file')
+var each = require('stream-each')
+
+var db = level('./dat.db')
+var drive = Hyperdrive(db)
+var archive = drive.createArchive(link, {
+ file: function (name) {
+ return raf(path.join(self.dir, name))
+ }
+})
+var swarm = Swarm(archive)
+
+archive.open(function (err) {
+ if (err) return console.error(err)
+ each(archive.list({live: archive.live}), function (data, next) {
+ var startBytes = self.stats.bytesDown
+ archive.download(data, function (err) {
+ if (err) return console.error(err)
+ console.log('file downloaded', data.relname)
+ next()
+ })
+ }, done)
+})
+
+```
diff --git a/ecosystem.md b/docs/ecosystem.md
index 673c513..673c513 100644
--- a/ecosystem.md
+++ b/docs/ecosystem.md
diff --git a/how-dat-works.md b/docs/how-dat-works.md
index 520174e..c4899af 100644
--- a/how-dat-works.md
+++ b/docs/how-dat-works.md
@@ -36,7 +36,7 @@ Here's the long version:
Hyperdrive shares and synchronizes a set of files, similar to rsync or Dropbox. For each file in the drive we use a technique called Rabin fingerprinting to break the file up into pieces. Rabin fingerprints are a specific strategy for what is called Content Defined Chunking. Here's an example:
-![cdc diagram](https://raw.githubusercontent.com/datproject/docs/master/meta/cdc.png)
+![cdc diagram](https://raw.githubusercontent.com/datproject/docs/master/assets/cdc.png)
We have configured our Rabin chunker to produce chunks that are around 16KB on average. So if you share a folder containing a single 1MB JPG you will get around 64 chunks.
@@ -68,4 +68,4 @@ Because Dat is built on a foundation of strong cryptographic data integrity and
## Implementations
-This covered a lot of ground. If you want to go deeper and see the implementations we are using in the [Dat command-line tool](https://github.com/maxogden/dat), go to the [Dependencies](ecosystem.md) page
+This covered a lot of ground. If you want to go deeper and see the implementations we are using in the [Dat command-line tool](https://github.com/maxogden/dat), go to the [Dependencies](ecosystem) page
diff --git a/hyperdrive.md b/docs/hyperdrive_spec.md
index 64298cd..64298cd 100644
--- a/hyperdrive.md
+++ b/docs/hyperdrive_spec.md
diff --git a/docs/readme.md b/docs/readme.md
new file mode 100644
index 0000000..6597fc3
--- /dev/null
+++ b/docs/readme.md
@@ -0,0 +1,6 @@
+## Writing Documentation
+
+1. Add file to this folder
+2. Write docs
+3. Add to table of contents in `contents.json`
+4. Build & Deploy (see main readme)
diff --git a/sleep.md b/docs/sleep.md
index 08811e9..08811e9 100644
--- a/sleep.md
+++ b/docs/sleep.md
diff --git a/welcome.md b/docs/welcome.md
index 51d7e5e..51d7e5e 100644
--- a/welcome.md
+++ b/docs/welcome.md
diff --git a/meta/changelog.md b/meta/changelog.md
deleted file mode 100644
index bbdd4b1..0000000
--- a/meta/changelog.md
+++ /dev/null
@@ -1,5 +0,0 @@
-## Changelog
-
-### 7.1.1 and earlier
-
-These refer to the pre-1.0 versions of dat and are omitted.
diff --git a/package.json b/package.json
index 53f033b..d889e21 100644
--- a/package.json
+++ b/package.json
@@ -4,9 +4,13 @@
"description": "Documentation for Dat and the surrounding ecosystem.",
"main": "index.js",
"scripts": {
- "update": "curl -Ssl https://raw.githubusercontent.com/clkao/awesome-dat/master/readme.md | grep '📔' | egrep -o 'github.com/[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+' | cut -f 2- -d / | uniq > repos.txt",
- "build": "minidocs . -c contents.json -p -i welcome -o dist -l dat-data.png -t 'The Dat Project' -s styles.css",
- "deploy": "echo docs.dat-data.com > dist/CNAME && gh-pages -d dist",
+ "update": "npm run update:list && npm run update:docs",
+ "update:list": "curl -Ssl https://raw.githubusercontent.com/clkao/awesome-dat/master/readme.md | grep '📔' | egrep -o 'github.com/[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+' | cut -f 2- -d / | uniq > repos.txt",
+ "update:build": "cat repos.txt | ecosystem-docs sync && cat repos.txt | ecosystem-docs read | node build.js",
+ "build:local": "minidocs docs -c docs/contents.json -i welcome -o dist -l ./node_modules/dat-design/downloads/dat-data-logo.png -t 'Dat Data' -s assets/styles.css --pushstate",
+ "build:deploy": "minidocs docs -c docs/contents.json -i welcome -o dist -l ./node_modules/dat-design/downloads/dat-data-logo.png -t 'Dat Data' -s assets/styles.css --full-html",
+ "deploy": "npm run build:deploy && echo docs.dat-data.com > dist/CNAME && gh-pages -d dist",
+ "start": "budo --dir dist --pushstate",
"paper": "pandoc --variable author=\"Maxwell Ogden, Karissa McKelvey, Mathias Buus\" --variable title=\"Dat - Distributed Dataset Synchronization And Versioning\" --variable date=\"Version 1.0.0, May 2016\" --variable classoption=twocolumn --variable papersize=a4paper -s dat-paper.md -o dat-paper.pdf"
},
"repository": {
@@ -21,16 +25,13 @@
},
"homepage": "https://github.com/datproject/docs#readme",
"dependencies": {
+ "dat-design": "^1.2.11",
"gh-pages": "^0.11.0",
- "minidocs": "^2.0.3"
+ "minidocs": "^4.0.5"
},
"devDependencies": {
- "ecosystem-docs": "^1.1.1"
- },
- "browserify": {
- "transform": [
- "sheetify/transform",
- "read-directory/transform"
- ]
+ "budo": "^8.3.0",
+ "ecosystem-docs": "^1.1.1",
+ "ndjson": "^1.4.3"
}
}
diff --git a/styles.css b/styles.css
deleted file mode 100644
index b529cec..0000000
--- a/styles.css
+++ /dev/null
@@ -1,4 +0,0 @@
-.minidocs-logo {
- max-height: 100px;
- width: inherit !important;
-}