付智勇

no message

正在显示 55 个修改的文件 包含 4280 行增加3 行删除

要显示太多修改。

为保证性能只显示 55 of 55+ 个文件。

... ... @@ -28,9 +28,9 @@ var tokenUtil = require('./util/tokenUtil');
const _ = require('lodash');
var status = require('./util/resTemplate')
// error handler
onerror(app)
app.use(koaBody({ multipart: true }));
// middlewares
... ... @@ -50,7 +50,10 @@ app.use(views(__dirname + '/views', {
app.use(async (ctx, next) => {
try{
const start = new Date();
if(filterUrl.indexOf(ctx.request.url) != -1){
const url = ctx.request.url.split('?');
console.log(url);
if(filterUrl.indexOf(url[0]) != -1){
await next();
}else if(!ctx.header.token){
status.catchError(ctx,400,'请登录');
... ...
All packages installed at Tue Aug 22 2017 18:52:38 GMT+0800 (CST)
\ No newline at end of file
All packages installed at Wed Sep 06 2017 18:02:21 GMT+0800 (CST)
\ No newline at end of file
... ...
Wed Sep 06 2017 17:57:14 GMT+0800 (CST)
\ No newline at end of file
... ...
# download-file
## Install
```shell
npm install download-file --save
```
## Usage
```js
var download = require('download-file')
var url = "http://i.imgur.com/G9bDaPH.jpg"
var options = {
directory: "./images/cats/",
filename: "cat.gif"
}
download(url, options, function(err){
if (err) throw err
console.log("meow")
})
```
## API
### download(url, [options], callback(err))
- __url__ string of the file URL to download
- __options__ object with options
- __directory__ string with path to directory where to save files (default: current working directory)
- __filename__ string for the name of the file to be saved as (default: filename in the url)
- __timeout__ integer of how long in ms to wait while downloading (default: 20000)
- __callback__ function to run after
... ...
var fs = require('fs')
var url = require('url')
var http = require('http')
var https = require('https')
var mkdirp = require('mkdirp')
module.exports = function download(file, options, callback) {
if (!file) throw("Need a file url to download")
if (!callback && typeof options === 'function') {
callback = options
}
options = typeof options === 'object' ? options : {}
options.timeout = options.timeout || 20000
options.directory = options.directory ? options.directory : '.'
var uri = file.split('/')
options.filename = options.filename || uri[uri.length - 1]
var path = options.directory + "/" + options.filename
if (url.parse(file).protocol === null) {
file = 'http://' + file
req = http
} else if (url.parse(file).protocol === 'https:') {
req = https
} else {
req = http
}
var request = req.get(file, function(response) {
if (response.statusCode === 200) {
mkdirp(options.directory, function(err) {
if (err) throw err
var file = fs.createWriteStream(path)
response.pipe(file)
})
} else {
if (callback) callback(response.statusCode)
}
response.on("end", function(){
if (callback) callback(false, path)
})
request.setTimeout(options.timeout, function () {
request.abort()
callback("Timeout")
})
}).on('error', function(e) {
if (callback) callback(e)
})
}
... ...
../../../_mkdirp@0.5.1@mkdirp/bin/cmd.js
\ No newline at end of file
... ...
../../_mkdirp@0.5.1@mkdirp
\ No newline at end of file
... ...
{
"name": "download-file",
"version": "0.1.5",
"description": "Generic file download utility",
"main": "index.js",
"scripts": {
"test": "node test.js"
},
"author": "Montana Flynn",
"license": "ISC",
"dependencies": {
"mkdirp": "^0.5.0"
},
"devDependencies": {},
"keywords": [
"download",
"file",
"url",
"get",
"http",
"https"
],
"_from": "download-file@0.1.5",
"_resolved": "http://registry.npm.taobao.org/download-file/download/download-file-0.1.5.tgz"
}
\ No newline at end of file
... ...
var dl = require('./index.js')
var url = "i.imgur.com/G9bDaPH.jpg"
var opts = {
directory: "./images/cats/",
filename: "cat.gif"
}
dl(url, opts, function(err){
if (err) hiss(err)
meow("no protocol")
})
var url = "http://i.imgur.com/G9bDaPH.jpg"
var opts = {
directory: "./images/cats/",
filename: "cat.gif"
}
dl(url, opts, function(err){
if (err) hiss(err)
meow("http protocol")
})
var url = "http://i.imgur.com/G9bDaPH.jpg"
var opts = {
directory: "./images/cats/",
filename: "cat.gif"
}
dl(url, opts, function(err){
if (err) hiss(err)
meow("https protocol")
})
function meow(msg) {
console.log("\033[32m", "meow: " + msg, "\033[91m")
}
function hiss(err) {
console.log("\033[31m", "hiss", "\033[91m")
throw err
}
... ...
Wed Sep 06 2017 17:57:15 GMT+0800 (CST)
\ No newline at end of file
... ...
sudo: false
language: node_js
node_js:
- "4"
- "6"
cache:
directories:
- node_modules
install:
- npm install
script:
- npm run test
# Necessary to compile native modules for io.js v3 or Node.js v4
env:
- CXX=g++-4.8
# Necessary to compile native modules for io.js v3 or Node.js v4
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- g++-4.8
notifications:
email: false
... ...
Copyright 2011 Marcel Laverdet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
... ...
fibers(1) -- Fiber support for v8 and Node
==========================================
INSTALLING
----------
### via npm
* `npm install fibers`
* You're done! (see "supported platforms" below if you run into errors)
### from source
* `git clone git://github.com/laverdet/node-fibers.git`
* `cd node-fibers`
* `npm install`
Note: node-fibers uses [node-gyp](https://github.com/TooTallNate/node-gyp) for
building. To manually invoke the build process, you can use `node-gyp rebuild`.
This will put the compiled extension in `build/Release/fibers.node`. However,
when you do `require('fibers')`, it will expect the module to be in, for
example, `bin/linux-x64-v8-3.11/fibers.node`. You can manually put the module
here every time you build, or you can use the included build script. Either
`npm install` or `node build -f` will do this for you. If you are going to be
hacking on node-fibers, it may be worthwhile to first do `node-gyp configure`
and then for subsequent rebuilds you can just do `node-gyp build` which will
be faster than a full `npm install` or `node-gyp rebuild`.
### meteor users please read this
If you're trying to get meteor running and you ended up at this page you're
probably doing something wrong. Please uninstall all versions of NodeJS and
Meteor, then start over. See
[meteor#5124](https://github.com/meteor/meteor/issues/5124) for more
information.
### supported platforms
If you are running NodeJS version 4.x, 5.x, or 6.x on Linux, OS X, or Windows
(7 or later) then you should be able to install fibers from npm just fine. If
you are running an older (or newer) version of node or some other operating
system you will have to compile fibers on your system.
(special thanks to [Jeroen Janssen](https://github.com/japj) for his work on
fibers in Windows)
If you do end up needing to compile fibers first make sure you have node-gyp
installed as a global dependency (`npm install -g node-gyp`), and that you have
setup your build enviroment by following the instructions at
[node-gyp](https://github.com/TooTallNate/node-gyp). Ubuntu-flavored Linux users
may need to run `sudo apt-get install g++` as well.
EXAMPLES
--------
The examples below describe basic use of `Fiber`, but note that it is **not
recommended** to use `Fiber` without an abstraction in between your code and
fibers. See "FUTURES" below for additional information.
### Sleep
This is a quick example of how you can write sleep() with fibers. Note that
while the sleep() call is blocking inside the fiber, node is able to handle
other events.
$ cat sleep.js
```javascript
var Fiber = require('fibers');
function sleep(ms) {
var fiber = Fiber.current;
setTimeout(function() {
fiber.run();
}, ms);
Fiber.yield();
}
Fiber(function() {
console.log('wait... ' + new Date);
sleep(1000);
console.log('ok... ' + new Date);
}).run();
console.log('back in main');
```
$ node sleep.js
wait... Fri Jan 21 2011 22:42:04 GMT+0900 (JST)
back in main
ok... Fri Jan 21 2011 22:42:05 GMT+0900 (JST)
### Incremental Generator
Yielding execution will resume back in the fiber right where you left off. You
can also pass values back and forth through yield() and run(). Again, the node
event loop is never blocked while this script is running.
$ cat generator.js
```javascript
var Fiber = require('fibers');
var inc = Fiber(function(start) {
var total = start;
while (true) {
total += Fiber.yield(total);
}
});
for (var ii = inc.run(1); ii <= 10; ii = inc.run(1)) {
console.log(ii);
}
```
$ node generator.js
1
2
3
4
5
6
7
8
9
10
### Fibonacci Generator
Expanding on the incremental generator above, we can create a generator which
returns a new Fibonacci number with each invocation. You can compare this with
the [ECMAScript Harmony
Generator](http://wiki.ecmascript.org/doku.php?id=harmony:generators) Fibonacci
example.
$ cat fibonacci.js
```javascript
var Fiber = require('fibers');
// Generator function. Returns a function which returns incrementing
// Fibonacci numbers with each call.
function Fibonacci() {
// Create a new fiber which yields sequential Fibonacci numbers
var fiber = Fiber(function() {
Fiber.yield(0); // F(0) -> 0
var prev = 0, curr = 1;
while (true) {
Fiber.yield(curr);
var tmp = prev + curr;
prev = curr;
curr = tmp;
}
});
// Return a bound handle to `run` on this fiber
return fiber.run.bind(fiber);
}
// Initialize a new Fibonacci sequence and iterate up to 1597
var seq = Fibonacci();
for (var ii = seq(); ii <= 1597; ii = seq()) {
console.log(ii);
}
```
$ node fibonacci.js
0
1
1
2
3
5
8
13
21
34
55
89
144
233
377
610
987
1597
### Basic Exceptions
Fibers are exception-safe; exceptions will continue travelling through fiber
boundaries:
$ cat error.js
```javascript
var Fiber = require('fibers');
var fn = Fiber(function() {
console.log('async work here...');
Fiber.yield();
console.log('still working...');
Fiber.yield();
console.log('just a little bit more...');
Fiber.yield();
throw new Error('oh crap!');
});
try {
while (true) {
fn.run();
}
} catch(e) {
console.log('safely caught that error!');
console.log(e.stack);
}
console.log('done!');
```
$ node error.js
async work here...
still working...
just a little bit more...
safely caught that error!
Error: oh crap!
at error.js:11:9
done!
FUTURES
-------
Using the `Fiber` class without an abstraction in between your code and the raw
API is **not recommended**. `Fiber` is meant to implement the smallest amount of
functionality in order make possible many different programming patterns. This
makes the `Fiber` class relatively lousy to work with directly, but extremely
powerful when coupled with a decent abstraction. There is no right answer for
which abstraction is right for you and your project. Included with `node-fibers`
is an implementation of "futures" which is fiber-aware. Usage of this library
is documented below. There are several other externally-maintained options
which can be found on the [wiki](https://github.com/laverdet/node-fibers/wiki).
You **should** feel encouraged to be creative with fibers and build a solution
which works well with your project. For instance, `Future` is not a good
abstraction to use if you want to build a generator function (see Fibonacci
example above).
Using `Future` to wrap existing node functions. At no point is the node event
loop blocked:
$ cat ls.js
```javascript
var Future = require('fibers/future');
var fs = Future.wrap(require('fs'));
Future.task(function() {
// Get a list of files in the directory
var fileNames = fs.readdirFuture('.').wait();
console.log('Found '+ fileNames.length+ ' files');
// Stat each file
var stats = [];
for (var ii = 0; ii < fileNames.length; ++ii) {
stats.push(fs.statFuture(fileNames[ii]));
}
stats.map(function(f) {
f.wait()
});
// Print file size
for (var ii = 0; ii < fileNames.length; ++ii) {
console.log(fileNames[ii]+ ': '+ stats[ii].get().size);
}
}).detach();
```
$ node ls.js
Found 11 files
bin: 4096
fibers.js: 1708
.gitignore: 37
README.md: 8664
future.js: 5833
.git: 4096
LICENSE: 1054
src: 4096
ls.js: 860
Makefile: 436
package.json: 684
The future API is designed to make it easy to move between classic
callback-style code and fiber-aware waiting code:
$ cat sleep.js
```javascript
var Future = require('fibers/future'), wait = Future.wait;
// This function returns a future which resolves after a timeout. This
// demonstrates manually resolving futures.
function sleep(ms) {
var future = new Future;
setTimeout(function() {
future.return();
}, ms);
return future;
}
// You can create functions which automatically run in their own fiber and
// return futures that resolve when the fiber returns (this probably sounds
// confusing.. just play with it to understand).
var calcTimerDelta = function(ms) {
var start = new Date;
sleep(ms).wait();
return new Date - start;
}.future(); // <-- important!
// And futures also include node-friendly callbacks if you don't want to use
// wait()
calcTimerDelta(2000).resolve(function(err, val) {
console.log('Set timer for 2000ms, waited '+ val+ 'ms');
});
```
$ node sleep.js
Set timer for 2000ms, waited 2009ms
API DOCUMENTATION
-----------------
Fiber's definition looks something like this:
```javascript
/**
* Instantiate a new Fiber. You may invoke this either as a function or as
* a constructor; the behavior is the same.
*
* When run() is called on this fiber for the first time, `fn` will be
* invoked as the first frame on a new stack. Execution will continue on
* this new stack until `fn` returns, or Fiber.yield() is called.
*
* After the function returns the fiber is reset to original state and
* may be restarted with another call to run().
*/
function Fiber(fn) {
[native code]
}
/**
* `Fiber.current` will contain the currently-running Fiber. It will be
* `undefined` if there is no fiber (i.e. the main stack of execution).
*
* See "Garbage Collection" for more information on responsible use of
* `Fiber.current`.
*/
Fiber.current = undefined;
/**
* `Fiber.yield()` will halt execution of the current fiber and return control
* back to original caller of run(). If an argument is supplied to yield(),
* run() will return that value.
*
* When run() is called again, yield() will return.
*
* Note that this function is a global to allow for correct garbage
* collection. This results in no loss of functionality because it is only
* valid to yield from the currently running fiber anyway.
*
* Note also that `yield` is a reserved word in Javascript. This is normally
* not an issue, however some code linters may complain. Rest assured that it
* will run fine now and in future versions of Javascript.
*/
Fiber.yield = function(param) {
[native code]
}
/**
* run() will start execution of this Fiber, or if it is currently yielding,
* it will resume execution. If an argument is supplied, this argument will
* be passed to the fiber, either as the first parameter to the main
* function [if the fiber has not been started] or as the return value of
* yield() [if the fiber is currently yielding].
*
* This function will return either the parameter passed to yield(), or the
* returned value from the fiber's main function.
*/
Fiber.prototype.run = function(param) {
[native code]
}
/**
* reset() will terminate a running Fiber and restore it to its original
* state, as if it had returned execution.
*
* This is accomplished by causing yield() to throw an exception, and any
* futher calls to yield() will also throw an exception. This continues
* until the fiber has completely unwound and returns.
*
* If the fiber returns a value it will be returned by reset().
*
* If the fiber is not running, reset() will have no effect.
*/
Fiber.prototype.reset = function() {
[native code]
}
/**
* throwInto() will cause a currently yielding fiber's yield() call to
* throw instead of return gracefully. This can be useful for notifying a
* fiber that you are no longer interested in its task, and that it should
* give up.
*
* Note that if the fiber does not handle the exception it will continue to
* bubble up and throwInto() will throw the exception right back at you.
*/
Fiber.prototype.throwInto = function(exception) {
[native code]
}
```
Future's definition looks something like this:
```javascript
/**
* Returns a future-function which, when run, starts running the target
* function and returns a future for the result.
*
* Example usage:
* var funcy = function(arg) {
* return arg+1;
* }.future();
*
* funcy(1).wait(); // returns 2
*/
Function.prototype.future = function() { ... }
/**
* Future object, instantiated with the new operator.
*/
function Future() {}
/**
* Wrap a node-style async function to return a future in place of using a callback.
*
* fn - the function or object to wrap
* array - indicates that this callback will return more than 1 argument after `err`. For example,
* `child_process.exec()` returns [err, stdout, stderr]
* suffix - appends a string to every method that was overridden, if you passed an object
*
* Example usage: Future.wrap(asyncFunction)(arg1).wait()
*/
Future.wrap = function(fn, multi, suffix) { ... }
/**
* Invoke a function that will be run in its own fiber context and return a future to its return
* value.
*
* Example:
* Future.task(function() {
* // You can safely `wait` on stuff here
* }).detach();
*/
Future.task = function(fn) { ... }
/**
* Wait on a series of futures and then return. If the futures throw an exception this function
* /won't/ throw it back. You can get the value of the future by calling get() on it directly. If
* you want to wait on a single future you're better off calling future.wait() on the instance.
*
* Example usage: Future.wait(aFuture, anotherFuture)
*/
Future.wait = function(/* ... */) { ... }
/**
* Return the value of this future. If the future hasn't resolved yet this will throw an error.
*/
Future.prototype.get = function() { ... }
/**
* Mark this future as returned. All pending callbacks will be invoked immediately.
*
* value - the value to return when get() or wait() is called.
*
* Example usage: aFuture.return(value)
*/
Future.prototype.return = function(value) { ... }
/**
* Throw from this future as returned. All pending callbacks will be invoked immediately.
* Note that execution will continue normally after running this method,
* so make sure you exit appropriately after running throw()
*
* error - the error to throw when get() or wait() is called.
*
* Example usage: aFuture.throw(new Error("Something borked"))
*/
Future.prototype.throw = function(error) { ... }
/**
* "detach" this future. Basically this is useful if you want to run a task in a future, you
* aren't interested in its return value, but if it throws you don't want the exception to be
* lost. If this fiber throws, an exception will be thrown to the event loop and node will
* probably fall down.
*/
Future.prototype.detach = function() { ... }
/**
* Returns whether or not this future has resolved yet.
*/
Future.prototype.isResolved = function() { ... }
/**
* Returns a node-style function which will mark this future as resolved when called.
*
* Example usage:
* var errback = aFuture.resolver();
* asyncFunction(arg1, arg2, etc, errback)
* var result = aFuture.wait();
*/
Future.prototype.resolver = function() { ... }
/**
* Waits for this future to resolve and then invokes a callback.
*
* If only one argument is passed it is a standard function(err, val){} errback.
*
* If two arguments are passed, the first argument is a future which will be thrown to in the case
* of error, and the second is a function(val){} callback.
*/
Future.prototype.resolve = function(/* errback or future, callback */) { ... }
/**
* Propogate results to another future.
*
* Example usage: future1.proxy(future2) // future2 gets automatically resolved with however future1 resolves
*/
Future.prototype.proxy = function(future) { ... }
/**
* Differs from its functional counterpart in that it actually resolves the future. Thus if the
* future threw, future.wait() will throw.
*/
Future.prototype.wait = function() { ... }
/**
* Support for converting a Future to and from ES6 Promises.
*/
Future.fromPromise = function(promise) { ... }
Future.prototype.promise = function() { ... }
```
GARBAGE COLLECTION
------------------
If you intend to build generators, iterators, or "lazy lists", you should be
aware that all fibers must eventually unwind. This is implemented by causing
yield() to throw unconditionally when the library is trying to unwind your
fiber-- either because reset() was called, or all handles to the fiber were lost
and v8 wants to delete it.
Something like this will, at some point, cause an infinite loop in your
application:
```javascript
var fiber = Fiber(function() {
while (true) {
try {
Fiber.yield();
} catch(e) {}
}
});
fiber.run();
```
If you either call reset() on this fiber, or the v8 garbage collector decides it
is no longer in use, the fiber library will attempt to unwind the fiber by
causing all calls to yield() to throw. However, if you catch these exceptions
and continue anyway, an infinite loop will occur.
There are other garbage collection issues that occur with misuse of fiber
handles. If you grab a handle to a fiber from within itself, you should make
sure that the fiber eventually unwinds. This application will leak memory:
```javascript
var fiber = Fiber(function() {
var that = Fiber.current;
Fiber.yield();
}
fiber.run();
fiber = undefined;
```
There is no way to get back into the fiber that was started, however it's
impossible for v8's garbage collector to detect this. With a handle to the fiber
still outstanding, v8 will never garbage collect it and the stack will remain in
memory until the application exits.
Thus, you should take care when grabbing references to `Fiber.current`.
... ...
#!/usr/bin/env node
"use strict";
var fs = require('fs');
global.Fiber = require('../fibers');
global.Future = require('../future');
// Start the repl
var vm = require('vm');
var domain = require('domain');
var repl = require('repl').start('node> ', null, fiberEval, true, true);
function fiberEval(code, context, file, cb) {
if (/^\([ \r\n\t+]\)$/.test(code)) {
return cb(false, undefined);
}
// Parses?
try {
new Function(code);
} catch (err) {
return cb(err, false);
}
// Run in fiber
Future.task(function() {
// Save history
var last;
repl.rli.history = repl.rli.history.slice(0, 50).filter(function(item) {
try {
return item !== last;
} finally {
last = item;
}
});
fs.writeFile(process.env.HOME+ '/.node-history', JSON.stringify(repl.rli.history), function(){});
// Run user code
var d = domain.create();
d.run(function() {
cb(null, vm.runInThisContext(code, file));
});
d.on('error', function(err) {
console.error('\nUnhandled error: '+ err.stack);
});
}).resolve(cb);
}
// Load history
try {
repl.rli.history = JSON.parse(fs.readFileSync(process.env.HOME+ '/.node-history', 'utf-8'));
} catch (err) {}
... ...
{
'target_defaults': {
'default_configuration': 'Release',
'configurations': {
'Release': {
'cflags': [ '-O3' ],
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3',
'GCC_GENERATE_DEBUGGING_SYMBOLS': 'NO',
},
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': 3,
'FavorSizeOrSpeed': 1,
},
},
}
},
},
'targets': [
{
'target_name': 'fibers',
'sources': [
'src/fibers.cc',
'src/coroutine.cc',
'src/libcoro/coro.c',
# Rebuild on header changes
'src/coroutine.h',
'src/libcoro/coro.h',
],
'cflags!': ['-ansi'],
'conditions': [
['OS == "win"',
{'defines': ['CORO_FIBER', 'WINDOWS']},
# else
{
'defines': ['USE_CORO', 'CORO_GUARDPAGES=1'],
'ldflags': ['-pthread'],
}
],
['OS == "linux"',
{
'variables': {
'USE_MUSL': '<!(ldd --version 2>&1 | head -n1 | grep "musl" | wc -l)',
},
'conditions': [
['<(USE_MUSL) == 1',
{'defines': ['CORO_ASM', 'USE_V8_SYMBOLS']},
{'defines': ['CORO_UCONTEXT']}
],
],
},
],
['OS == "solaris" or OS == "sunos" or OS == "freebsd" or OS == "aix"', {'defines': ['CORO_UCONTEXT']}],
['OS == "mac"', {'defines': ['CORO_SJLJ']}],
['OS == "openbsd"', {'defines': ['CORO_ASM']}],
['target_arch == "arm" or target_arch == "arm64"',
{
# There's been problems getting real fibers working on arm
'defines': ['CORO_PTHREAD'],
'defines!': ['CORO_UCONTEXT', 'CORO_SJLJ', 'CORO_ASM'],
},
],
],
},
],
}
... ...
#!/usr/bin/env node
var cp = require('child_process'),
fs = require('fs'),
path = require('path');
// Parse args
var force = false, debug = false;
var
arch = process.arch,
platform = process.platform;
var args = process.argv.slice(2).filter(function(arg) {
if (arg === '-f') {
force = true;
return false;
} else if (arg.substring(0, 13) === '--target_arch') {
arch = arg.substring(14);
} else if (arg === '--debug') {
debug = true;
}
return true;
});
if (!debug) {
args.push('--release');
}
if (!{ia32: true, x64: true, arm: true, arm64: true, ppc: true, ppc64: true, s390: true, s390x: true}.hasOwnProperty(arch)) {
console.error('Unsupported (?) architecture: `'+ arch+ '`');
process.exit(1);
}
// Test for pre-built library
var modPath = platform+ '-'+ arch+ '-'+ process.versions.modules;
if (!force) {
try {
fs.statSync(path.join(__dirname, 'bin', modPath, 'fibers.node'));
console.log('`'+ modPath+ '` exists; testing');
cp.execFile(process.execPath, ['quick-test'], function(err, stdout, stderr) {
if (err || stdout !== 'pass' || stderr) {
console.log('Problem with the binary; manual build incoming');
build();
} else {
console.log('Binary is fine; exiting');
}
});
} catch (ex) {
// Stat failed
build();
}
} else {
build();
}
// Build it
function build() {
if (process.versions.electron) {
args.push('--target='+ process.versions.electron, '--dist-url=https://atom.io/download/atom-shell');
}
cp.spawn(
process.platform === 'win32' ? 'node-gyp.cmd' : 'node-gyp',
['rebuild'].concat(args),
{stdio: [process.stdin, process.stdout, process.stderr]})
.on('exit', function(err) {
if (err) {
console.error(
'node-gyp exited with code: '+ err+ '\n'+
'Please make sure you are using a supported platform and node version. If you\n'+
'would like to compile fibers on this machine please make sure you have setup your\n'+
'build environment--\n'+
'Windows + OS X instructions here: https://github.com/nodejs/node-gyp\n'+
'Ubuntu users please run: `sudo apt-get install g++ build-essential`\n'+
'Alpine users please run: `sudo apk add python make g++`'
);
return process.exit(err);
}
afterBuild();
})
.on('error', function(err) {
console.error(
'node-gyp not found! Please ensure node-gyp is in your PATH--\n'+
'Try running: `sudo npm install -g node-gyp`'
);
console.log(err.message);
process.exit(1);
});
}
// Move it to expected location
function afterBuild() {
var targetPath = path.join(__dirname, 'build', debug ? 'Debug' : 'Release', 'fibers.node');
var installPath = path.join(__dirname, 'bin', modPath, 'fibers.node');
try {
fs.mkdirSync(path.join(__dirname, 'bin', modPath));
} catch (ex) {}
try {
fs.statSync(targetPath);
} catch (ex) {
console.error('Build succeeded but target not found');
process.exit(1);
}
fs.renameSync(targetPath, installPath);
console.log('Installed in `'+ installPath+ '`');
if (process.versions.electron) {
process.nextTick(function() {
require('electron').app.quit();
});
}
}
... ...
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := ..
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= .
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= Release
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
CC.target ?= $(CC)
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= $(CXX)
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= $(LINK)
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= gcc
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= g++
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= $(CXX.host)
LDFLAGS.host ?=
AR.host ?= ar
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),?,$1)
unreplace_spaces = $(subst ?,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters.
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp -af "$<" "$@"
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%s\n' '$(call escape_quotes,$(1))'
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain ? instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\
for p in $(POSTBUILDS); do\
eval $$p;\
E=$$?;\
if [ $$E -ne 0 ]; then\
break;\
fi;\
done;\
if [ $$E -ne 0 ]; then\
rm -rf "$@";\
exit $$E;\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains ? for
# spaces already and dirx strips the ? characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word 2,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "all" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: all
all:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
TOOLSET := target
# Suffix rules, putting all outputs into $(obj).
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.m FORCE_DO_CMD
@$(call do_cmd,objc,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.mm FORCE_DO_CMD
@$(call do_cmd,objcxx,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.S FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(srcdir)/%.s FORCE_DO_CMD
@$(call do_cmd,cc,1)
# Try building from generated source, too.
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.m FORCE_DO_CMD
@$(call do_cmd,objc,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.mm FORCE_DO_CMD
@$(call do_cmd,objcxx,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.S FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj).$(TOOLSET)/%.s FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cpp FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.cxx FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.m FORCE_DO_CMD
@$(call do_cmd,objc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.mm FORCE_DO_CMD
@$(call do_cmd,objcxx,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.S FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/%.o: $(obj)/%.s FORCE_DO_CMD
@$(call do_cmd,cc,1)
ifeq ($(strip $(foreach prefix,$(NO_LOAD),\
$(findstring $(join ^,$(prefix)),\
$(join ^,fibers.target.mk)))),)
include fibers.target.mk
endif
quiet_cmd_regen_makefile = ACTION Regenerating $@
cmd_regen_makefile = cd $(srcdir); /usr/local/lib/node_modules/cnpm/node_modules/npminstall/node_modules/node-gyp/gyp/gyp_main.py -fmake --ignore-environment "--toplevel-dir=." -I/Users/fzy/project/koa2_Sequelize_project/node_modules/_fibers@1.0.15@fibers/build/config.gypi -I/usr/local/lib/node_modules/cnpm/node_modules/npminstall/node_modules/node-gyp/addon.gypi -I/Users/fzy/.node-gyp/8.2.1/include/node/common.gypi "--depth=." "-Goutput_dir=." "--generator-output=build" "-Dlibrary=shared_library" "-Dvisibility=default" "-Dnode_root_dir=/Users/fzy/.node-gyp/8.2.1" "-Dnode_gyp_dir=/usr/local/lib/node_modules/cnpm/node_modules/npminstall/node_modules/node-gyp" "-Dnode_lib_file=node.lib" "-Dmodule_root_dir=/Users/fzy/project/koa2_Sequelize_project/node_modules/_fibers@1.0.15@fibers" "-Dnode_engine=v8" binding.gyp
Makefile: $(srcdir)/../../../../.node-gyp/8.2.1/include/node/common.gypi $(srcdir)/../../../../../../usr/local/lib/node_modules/cnpm/node_modules/npminstall/node_modules/node-gyp/addon.gypi $(srcdir)/build/config.gypi $(srcdir)/binding.gyp
$(call do_cmd,regen_makefile)
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
... ...
cmd_Release/fibers.node := c++ -bundle -undefined dynamic_lookup -Wl,-no_pie -Wl,-search_paths_first -mmacosx-version-min=10.7 -arch x86_64 -L./Release -stdlib=libc++ -o Release/fibers.node Release/obj.target/fibers/src/fibers.o Release/obj.target/fibers/src/coroutine.o Release/obj.target/fibers/src/libcoro/coro.o
... ...
cmd_Release/obj.target/fibers/src/coroutine.o := c++ '-DNODE_GYP_MODULE_NAME=fibers' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-D_DARWIN_USE_64_BIT_INODE=1' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-DUSE_CORO' '-DCORO_GUARDPAGES=1' '-DCORO_SJLJ' '-DBUILDING_NODE_EXTENSION' -I/Users/fzy/.node-gyp/8.2.1/include/node -I/Users/fzy/.node-gyp/8.2.1/src -I/Users/fzy/.node-gyp/8.2.1/deps/uv/include -I/Users/fzy/.node-gyp/8.2.1/deps/v8/include -O3 -mmacosx-version-min=10.7 -arch x86_64 -Wall -Wendif-labels -W -Wno-unused-parameter -std=gnu++0x -stdlib=libc++ -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-strict-aliasing -MMD -MF ./Release/.deps/Release/obj.target/fibers/src/coroutine.o.d.raw -c -o Release/obj.target/fibers/src/coroutine.o ../src/coroutine.cc
Release/obj.target/fibers/src/coroutine.o: ../src/coroutine.cc \
../src/coroutine.h /Users/fzy/.node-gyp/8.2.1/include/node/node.h \
/Users/fzy/.node-gyp/8.2.1/include/node/v8.h \
/Users/fzy/.node-gyp/8.2.1/include/node/v8-version.h \
/Users/fzy/.node-gyp/8.2.1/include/node/v8config.h \
/Users/fzy/.node-gyp/8.2.1/include/node/node_version.h \
../src/libcoro/coro.h ../src/v8-version.h
../src/coroutine.cc:
../src/coroutine.h:
/Users/fzy/.node-gyp/8.2.1/include/node/node.h:
/Users/fzy/.node-gyp/8.2.1/include/node/v8.h:
/Users/fzy/.node-gyp/8.2.1/include/node/v8-version.h:
/Users/fzy/.node-gyp/8.2.1/include/node/v8config.h:
/Users/fzy/.node-gyp/8.2.1/include/node/node_version.h:
../src/libcoro/coro.h:
../src/v8-version.h:
... ...
cmd_Release/obj.target/fibers/src/fibers.o := c++ '-DNODE_GYP_MODULE_NAME=fibers' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-D_DARWIN_USE_64_BIT_INODE=1' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-DUSE_CORO' '-DCORO_GUARDPAGES=1' '-DCORO_SJLJ' '-DBUILDING_NODE_EXTENSION' -I/Users/fzy/.node-gyp/8.2.1/include/node -I/Users/fzy/.node-gyp/8.2.1/src -I/Users/fzy/.node-gyp/8.2.1/deps/uv/include -I/Users/fzy/.node-gyp/8.2.1/deps/v8/include -O3 -mmacosx-version-min=10.7 -arch x86_64 -Wall -Wendif-labels -W -Wno-unused-parameter -std=gnu++0x -stdlib=libc++ -fno-rtti -fno-exceptions -fno-threadsafe-statics -fno-strict-aliasing -MMD -MF ./Release/.deps/Release/obj.target/fibers/src/fibers.o.d.raw -c -o Release/obj.target/fibers/src/fibers.o ../src/fibers.cc
Release/obj.target/fibers/src/fibers.o: ../src/fibers.cc \
../src/coroutine.h /Users/fzy/.node-gyp/8.2.1/include/node/node.h \
/Users/fzy/.node-gyp/8.2.1/include/node/v8.h \
/Users/fzy/.node-gyp/8.2.1/include/node/v8-version.h \
/Users/fzy/.node-gyp/8.2.1/include/node/v8config.h \
/Users/fzy/.node-gyp/8.2.1/include/node/node_version.h \
../src/libcoro/coro.h ../src/v8-version.h
../src/fibers.cc:
../src/coroutine.h:
/Users/fzy/.node-gyp/8.2.1/include/node/node.h:
/Users/fzy/.node-gyp/8.2.1/include/node/v8.h:
/Users/fzy/.node-gyp/8.2.1/include/node/v8-version.h:
/Users/fzy/.node-gyp/8.2.1/include/node/v8config.h:
/Users/fzy/.node-gyp/8.2.1/include/node/node_version.h:
../src/libcoro/coro.h:
../src/v8-version.h:
... ...
cmd_Release/obj.target/fibers/src/libcoro/coro.o := cc '-DNODE_GYP_MODULE_NAME=fibers' '-DUSING_UV_SHARED=1' '-DUSING_V8_SHARED=1' '-DV8_DEPRECATION_WARNINGS=1' '-D_DARWIN_USE_64_BIT_INODE=1' '-D_LARGEFILE_SOURCE' '-D_FILE_OFFSET_BITS=64' '-DUSE_CORO' '-DCORO_GUARDPAGES=1' '-DCORO_SJLJ' '-DBUILDING_NODE_EXTENSION' -I/Users/fzy/.node-gyp/8.2.1/include/node -I/Users/fzy/.node-gyp/8.2.1/src -I/Users/fzy/.node-gyp/8.2.1/deps/uv/include -I/Users/fzy/.node-gyp/8.2.1/deps/v8/include -O3 -mmacosx-version-min=10.7 -arch x86_64 -Wall -Wendif-labels -W -Wno-unused-parameter -fno-strict-aliasing -MMD -MF ./Release/.deps/Release/obj.target/fibers/src/libcoro/coro.o.d.raw -c -o Release/obj.target/fibers/src/libcoro/coro.o ../src/libcoro/coro.c
Release/obj.target/fibers/src/libcoro/coro.o: ../src/libcoro/coro.c \
../src/libcoro/coro.h
../src/libcoro/coro.c:
../src/libcoro/coro.h:
... ...
# This file is generated by gyp; do not edit.
export builddir_name ?= ./build/.
.PHONY: all
all:
$(MAKE) fibers
... ...
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt59l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt59l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "59",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"xcode_version": "7.0",
"nodedir": "/Users/fzy/.node-gyp/8.2.1",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"registry": "https://registry.npm.taobao.org",
"userconfig": "/Users/fzy/.cnpmrc",
"r": "https://registry.npm.taobao.org",
"disturl": "https://npm.taobao.org/mirrors/node",
"cache": "/Users/fzy/.npminstall_tarball"
}
}
... ...
# This file is generated by gyp; do not edit.
TOOLSET := target
TARGET := fibers
DEFS_Debug := \
'-DNODE_GYP_MODULE_NAME=fibers' \
'-DUSING_UV_SHARED=1' \
'-DUSING_V8_SHARED=1' \
'-DV8_DEPRECATION_WARNINGS=1' \
'-D_DARWIN_USE_64_BIT_INODE=1' \
'-D_LARGEFILE_SOURCE' \
'-D_FILE_OFFSET_BITS=64' \
'-DUSE_CORO' \
'-DCORO_GUARDPAGES=1' \
'-DCORO_SJLJ' \
'-DBUILDING_NODE_EXTENSION' \
'-DDEBUG' \
'-D_DEBUG' \
'-DV8_ENABLE_CHECKS'
# Flags passed to all source files.
CFLAGS_Debug := \
-O0 \
-gdwarf-2 \
-mmacosx-version-min=10.7 \
-arch x86_64 \
-Wall \
-Wendif-labels \
-W \
-Wno-unused-parameter
# Flags passed to only C files.
CFLAGS_C_Debug := \
-fno-strict-aliasing
# Flags passed to only C++ files.
CFLAGS_CC_Debug := \
-std=gnu++0x \
-stdlib=libc++ \
-fno-rtti \
-fno-exceptions \
-fno-threadsafe-statics \
-fno-strict-aliasing
# Flags passed to only ObjC files.
CFLAGS_OBJC_Debug :=
# Flags passed to only ObjC++ files.
CFLAGS_OBJCC_Debug :=
INCS_Debug := \
-I/Users/fzy/.node-gyp/8.2.1/include/node \
-I/Users/fzy/.node-gyp/8.2.1/src \
-I/Users/fzy/.node-gyp/8.2.1/deps/uv/include \
-I/Users/fzy/.node-gyp/8.2.1/deps/v8/include
DEFS_Release := \
'-DNODE_GYP_MODULE_NAME=fibers' \
'-DUSING_UV_SHARED=1' \
'-DUSING_V8_SHARED=1' \
'-DV8_DEPRECATION_WARNINGS=1' \
'-D_DARWIN_USE_64_BIT_INODE=1' \
'-D_LARGEFILE_SOURCE' \
'-D_FILE_OFFSET_BITS=64' \
'-DUSE_CORO' \
'-DCORO_GUARDPAGES=1' \
'-DCORO_SJLJ' \
'-DBUILDING_NODE_EXTENSION'
# Flags passed to all source files.
CFLAGS_Release := \
-O3 \
-mmacosx-version-min=10.7 \
-arch x86_64 \
-Wall \
-Wendif-labels \
-W \
-Wno-unused-parameter
# Flags passed to only C files.
CFLAGS_C_Release := \
-fno-strict-aliasing
# Flags passed to only C++ files.
CFLAGS_CC_Release := \
-std=gnu++0x \
-stdlib=libc++ \
-fno-rtti \
-fno-exceptions \
-fno-threadsafe-statics \
-fno-strict-aliasing
# Flags passed to only ObjC files.
CFLAGS_OBJC_Release :=
# Flags passed to only ObjC++ files.
CFLAGS_OBJCC_Release :=
INCS_Release := \
-I/Users/fzy/.node-gyp/8.2.1/include/node \
-I/Users/fzy/.node-gyp/8.2.1/src \
-I/Users/fzy/.node-gyp/8.2.1/deps/uv/include \
-I/Users/fzy/.node-gyp/8.2.1/deps/v8/include
OBJS := \
$(obj).target/$(TARGET)/src/fibers.o \
$(obj).target/$(TARGET)/src/coroutine.o \
$(obj).target/$(TARGET)/src/libcoro/coro.o
# Add to the list of files we specially track dependencies for.
all_deps += $(OBJS)
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.
$(OBJS): TOOLSET := $(TOOLSET)
$(OBJS): GYP_CFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_C_$(BUILDTYPE))
$(OBJS): GYP_CXXFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_CC_$(BUILDTYPE))
$(OBJS): GYP_OBJCFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))
$(OBJS): GYP_OBJCXXFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))
# Suffix rules, putting all outputs into $(obj).
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
# Try building from generated source, too.
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.c FORCE_DO_CMD
@$(call do_cmd,cc,1)
# End of this set of suffix rules
### Rules for final target.
LDFLAGS_Debug := \
-undefined dynamic_lookup \
-Wl,-no_pie \
-Wl,-search_paths_first \
-mmacosx-version-min=10.7 \
-arch x86_64 \
-L$(builddir) \
-stdlib=libc++
LIBTOOLFLAGS_Debug := \
-undefined dynamic_lookup \
-Wl,-no_pie \
-Wl,-search_paths_first
LDFLAGS_Release := \
-undefined dynamic_lookup \
-Wl,-no_pie \
-Wl,-search_paths_first \
-mmacosx-version-min=10.7 \
-arch x86_64 \
-L$(builddir) \
-stdlib=libc++
LIBTOOLFLAGS_Release := \
-undefined dynamic_lookup \
-Wl,-no_pie \
-Wl,-search_paths_first
LIBS :=
$(builddir)/fibers.node: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))
$(builddir)/fibers.node: LIBS := $(LIBS)
$(builddir)/fibers.node: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))
$(builddir)/fibers.node: TOOLSET := $(TOOLSET)
$(builddir)/fibers.node: $(OBJS) FORCE_DO_CMD
$(call do_cmd,solink_module)
all_deps += $(builddir)/fibers.node
# Add target alias
.PHONY: fibers
fibers: $(builddir)/fibers.node
# Short alias for building this executable.
.PHONY: fibers.node
fibers.node: $(builddir)/fibers.node
# Add executable to "all" target.
.PHONY: all
all: $(builddir)/fibers.node
... ...
#!/usr/bin/env python
# Generated by gyp. Do not edit.
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
... ...
if (process.fiberLib) {
return module.exports = process.fiberLib;
}
var fs = require('fs'), path = require('path');
// Seed random numbers [gh-82]
Math.random();
// Look for binary for this platform
var modPath = path.join(__dirname, 'bin', process.platform+ '-'+ process.arch+ '-'+ process.versions.modules, 'fibers');
try {
fs.statSync(modPath+ '.node');
} catch (ex) {
// No binary!
console.error(
'## There is an issue with `node-fibers` ##\n'+
'`'+ modPath+ '.node` is missing.\n\n'+
'Try running this to fix the issue: '+ process.execPath+ ' '+ __dirname.replace(' ', '\\ ')+ '/build'
);
throw new Error('Missing binary. See message above.');
}
// Pull in fibers implementation
process.fiberLib = module.exports = require(modPath).Fiber;
... ...
"use strict";
var Fiber = require('./fibers');
var util = require('util');
module.exports = Future;
Function.prototype.future = function(detach) {
var fn = this;
var ret = function() {
var future = new FiberFuture(fn, this, arguments);
if (detach) {
future.detach();
}
return future;
};
ret.toString = function() {
return '<<Future '+ fn+ '.future()>>';
};
return ret;
};
function Future() {}
/**
* Run a function(s) in a future context, and return a future to their return value. This is useful
* for instances where you want a closure to be able to `.wait()`. This also lets you wait for
* mulitple parallel opertions to run.
*/
Future.task = function(fn) {
if (arguments.length === 1) {
return fn.future()();
} else {
var future = new Future, pending = arguments.length, error, values = new Array(arguments.length);
for (var ii = 0; ii < arguments.length; ++ii) {
arguments[ii].future()().resolve(function(ii, err, val) {
if (err) {
error = err;
}
values[ii] = val;
if (--pending === 0) {
if (error) {
future.throw(error);
} else {
future.return(values);
}
}
}.bind(null, ii));
}
return future;
}
};
/**
* Wrap node-style async functions to instead return futures. This assumes that the last parameter
* of the function is a callback.
*
* If a single function is passed a future-returning function is created. If an object is passed a
* new object is returned with all functions wrapped.
*
* The value that is returned from the invocation of the underlying function is assigned to the
* property `_` on the future. This is useful for functions like `execFile` which take a callback,
* but also return meaningful information.
*
* `multi` indicates that this callback will return more than 1 argument after `err`. For example,
* `child_process.exec()`
*
* `suffix` will append a string to every method that was overridden, if you pass an object to
* `Future.wrap()`. Default is 'Future'.
*
* var readFileFuture = Future.wrap(require('fs').readFile);
* var fs = Future.wrap(require('fs'));
* fs.readFileFuture('example.txt').wait();
*/
Future.wrap = function(fnOrObject, multi, suffix, stop) {
if (typeof fnOrObject === 'object') {
var wrapped = Object.create(fnOrObject);
for (var ii in fnOrObject) {
if (wrapped[ii] instanceof Function) {
wrapped[suffix === undefined ? ii+ 'Future' : ii+ suffix] = Future.wrap(wrapped[ii], multi, suffix, stop);
}
}
return wrapped;
} else if (typeof fnOrObject === 'function') {
var fn = function() {
var future = new Future;
var args = Array.prototype.slice.call(arguments);
if (multi) {
var cb = future.resolver();
args.push(function(err) {
cb(err, Array.prototype.slice.call(arguments, 1));
});
} else {
args.push(future.resolver());
}
future._ = fnOrObject.apply(this, args);
return future;
}
// Modules like `request` return a function that has more functions as properties. Handle this
// in some kind of reasonable way.
if (!stop) {
var proto = Object.create(fnOrObject);
for (var ii in fnOrObject) {
if (fnOrObject.hasOwnProperty(ii) && fnOrObject[ii] instanceof Function) {
proto[ii] = proto[ii];
}
}
fn.__proto__ = Future.wrap(proto, multi, suffix, true);
}
return fn;
}
};
/**
* Wait on a series of futures and then return. If the futures throw an exception this function
* /won't/ throw it back. You can get the value of the future by calling get() on it directly. If
* you want to wait on a single future you're better off calling future.wait() on the instance.
*/
Future.wait = function wait(/* ... */) {
// Normalize arguments + pull out a FiberFuture for reuse if possible
var futures = [], singleFiberFuture;
for (var ii = 0; ii < arguments.length; ++ii) {
var arg = arguments[ii];
if (arg instanceof Future) {
// Ignore already resolved fibers
if (arg.isResolved()) {
continue;
}
// Look for fiber reuse
if (!singleFiberFuture && arg instanceof FiberFuture && !arg.started) {
singleFiberFuture = arg;
continue;
}
futures.push(arg);
} else if (arg instanceof Array) {
for (var jj = 0; jj < arg.length; ++jj) {
var aarg = arg[jj];
if (aarg instanceof Future) {
// Ignore already resolved fibers
if (aarg.isResolved()) {
continue;
}
// Look for fiber reuse
if (!singleFiberFuture && aarg instanceof FiberFuture && !aarg.started) {
singleFiberFuture = aarg;
continue;
}
futures.push(aarg);
} else {
throw new Error(aarg+ ' is not a future');
}
}
} else {
throw new Error(arg+ ' is not a future');
}
}
// Resumes current fiber
var fiber = Fiber.current;
if (!fiber) {
throw new Error('Can\'t wait without a fiber');
}
// Resolve all futures
var pending = futures.length + (singleFiberFuture ? 1 : 0);
function cb() {
if (!--pending) {
fiber.run();
}
}
for (var ii = 0; ii < futures.length; ++ii) {
futures[ii].resolve(cb);
}
// Reusing a fiber?
if (singleFiberFuture) {
singleFiberFuture.started = true;
try {
singleFiberFuture.return(
singleFiberFuture.fn.apply(singleFiberFuture.context, singleFiberFuture.args));
} catch(e) {
singleFiberFuture.throw(e);
}
--pending;
}
// Yield this fiber
if (pending) {
Fiber.yield();
}
};
/**
* Return a Future that waits on an ES6 Promise.
*/
Future.fromPromise = function(promise) {
var future = new Future;
promise.then(function(val) {
future.return(val);
}, function(err) {
future.throw(err);
});
return future;
};
Future.prototype = {
/**
* Return the value of this future. If the future hasn't resolved yet this will throw an error.
*/
get: function() {
if (!this.resolved) {
throw new Error('Future must resolve before value is ready');
} else if (this.error) {
// Link the stack traces up
var error = this.error;
var localStack = {};
Error.captureStackTrace(localStack, Future.prototype.get);
var futureStack = Object.getOwnPropertyDescriptor(error, 'futureStack');
if (!futureStack) {
futureStack = Object.getOwnPropertyDescriptor(error, 'stack');
if (futureStack) {
Object.defineProperty(error, 'futureStack', futureStack);
}
}
if (futureStack && futureStack.get) {
Object.defineProperty(error, 'stack', {
get: function() {
var stack = futureStack.get.apply(error);
if (stack) {
stack = stack.split('\n');
return [stack[0]]
.concat(localStack.stack.split('\n').slice(1))
.concat(' - - - - -')
.concat(stack.slice(1))
.join('\n');
} else {
return localStack.stack;
}
},
set: function(stack) {
Object.defineProperty(error, 'stack', {
value: stack,
configurable: true,
enumerable: false,
writable: true,
});
},
configurable: true,
enumerable: false,
});
}
throw error;
} else {
return this.value;
}
},
/**
* Mark this future as returned. All pending callbacks will be invoked immediately.
*/
"return": function(value) {
if (this.resolved) {
throw new Error('Future resolved more than once');
}
this.value = value;
this.resolved = true;
var callbacks = this.callbacks;
if (callbacks) {
delete this.callbacks;
for (var ii = 0; ii < callbacks.length; ++ii) {
try {
var ref = callbacks[ii];
if (ref[1]) {
ref[1](value);
} else {
ref[0](undefined, value);
}
} catch(ex) {
// console.log('Resolve cb threw', String(ex.stack || ex.message || ex));
process.nextTick(function() {
throw(ex);
});
}
}
}
},
/**
* Throw from this future as returned. All pending callbacks will be invoked immediately.
*/
"throw": function(error) {
if (this.resolved) {
throw new Error('Future resolved more than once');
} else if (!error) {
throw new Error('Must throw non-empty error');
}
this.error = error;
this.resolved = true;
var callbacks = this.callbacks;
if (callbacks) {
delete this.callbacks;
for (var ii = 0; ii < callbacks.length; ++ii) {
try {
var ref = callbacks[ii];
if (ref[1]) {
ref[0].throw(error);
} else {
ref[0](error);
}
} catch(ex) {
// console.log('Resolve cb threw', String(ex.stack || ex.message || ex));
process.nextTick(function() {
throw(ex);
});
}
}
}
},
/**
* "detach" this future. Basically this is useful if you want to run a task in a future, you
* aren't interested in its return value, but if it throws you don't want the exception to be
* lost. If this fiber throws, an exception will be thrown to the event loop and node will
* probably fall down.
*/
detach: function() {
this.resolve(function(err) {
if (err) {
throw err;
}
});
},
/**
* Returns whether or not this future has resolved yet.
*/
isResolved: function() {
return this.resolved === true;
},
/**
* Returns a node-style function which will mark this future as resolved when called.
*/
resolver: function() {
return function(err, val) {
if (err) {
this.throw(err);
} else {
this.return(val);
}
}.bind(this);
},
/**
* Waits for this future to resolve and then invokes a callback.
*
* If two arguments are passed, the first argument is a future which will be thrown to in the case
* of error, and the second is a function(val){} callback.
*
* If only one argument is passed it is a standard function(err, val){} callback.
*/
resolve: function(arg1, arg2) {
if (this.resolved) {
if (arg2) {
if (this.error) {
arg1.throw(this.error);
} else {
arg2(this.value);
}
} else {
arg1(this.error, this.value);
}
} else {
(this.callbacks = this.callbacks || []).push([arg1, arg2]);
}
return this;
},
/**
* Resolve only in the case of success
*/
resolveSuccess: function(cb) {
this.resolve(function(err, val) {
if (err) {
return;
}
cb(val);
});
return this;
},
/**
* Propogate results to another future.
*/
proxy: function(future) {
this.resolve(function(err, val) {
if (err) {
future.throw(err);
} else {
future.return(val);
}
});
},
/**
* Propogate only errors to an another future or array of futures.
*/
proxyErrors: function(futures) {
this.resolve(function(err) {
if (!err) {
return;
}
if (futures instanceof Array) {
for (var ii = 0; ii < futures.length; ++ii) {
futures[ii].throw(err);
}
} else {
futures.throw(err);
}
});
return this;
},
/**
* Returns an ES6 Promise
*/
promise: function() {
var that = this;
return new Promise(function(resolve, reject) {
that.resolve(function(err, val) {
if (err) {
reject(err);
} else {
resolve(val);
}
});
});
},
/**
* Differs from its functional counterpart in that it actually resolves the future. Thus if the
* future threw, future.wait() will throw.
*/
wait: function() {
if (this.isResolved()) {
return this.get();
}
Future.wait(this);
return this.get();
},
};
/**
* A function call which loads inside a fiber automatically and returns a future.
*/
function FiberFuture(fn, context, args) {
this.fn = fn;
this.context = context;
this.args = args;
this.started = false;
var that = this;
process.nextTick(function() {
if (!that.started) {
that.started = true;
Fiber(function() {
try {
that.return(fn.apply(context, args));
} catch(e) {
that.throw(e);
}
}).run();
}
});
}
util.inherits(FiberFuture, Future);
... ...
{
"name": "fibers",
"version": "1.0.15",
"description": "Cooperative multi-tasking for Javascript",
"keywords": [
"fiber",
"fibers",
"coroutine",
"thread",
"async",
"parallel",
"worker",
"future",
"promise"
],
"homepage": "https://github.com/laverdet/node-fibers",
"author": "Marcel Laverdet <marcel@laverdet.com> (https://github.com/laverdet/)",
"main": "fibers",
"scripts": {
"install": "node build.js || nodejs build.js",
"test": "node test.js || nodejs test.js"
},
"repository": {
"type": "git",
"url": "git://github.com/laverdet/node-fibers.git"
},
"license": "MIT",
"engines": {
"node": ">=0.5.2"
},
"_from": "fibers@1.0.15",
"_resolved": "http://registry.npm.taobao.org/fibers/download/fibers-1.0.15.tgz"
}
\ No newline at end of file
... ...
"use strict"
var Fiber = require('./fibers');
var fiber = Fiber(function() {
process.stdout.write(Fiber.yield());
});
fiber.run();
fiber.run('pass');
... ...
#include "coroutine.h"
#include "v8-version.h"
#include <assert.h>
#ifndef WINDOWS
#include <pthread.h>
#else
#include <windows.h>
#include <intrin.h>
// Stub pthreads into Windows approximations
#define pthread_t HANDLE
#define pthread_create(thread, attr, fn, arg) !((*thread)=CreateThread(NULL, 0, &(fn), arg, 0, NULL))
#define pthread_join(thread, arg) WaitForSingleObject((thread), INFINITE)
#define pthread_key_t DWORD
#define pthread_key_create(key, dtor) (*key)=TlsAlloc()
#define pthread_setspecific(key, val) TlsSetValue((key), (val))
#define pthread_getspecific(key) TlsGetValue((key))
#endif
#include <stdexcept>
#include <stack>
#include <vector>
using namespace std;
const size_t v8_tls_keys = 3;
static std::vector<void*> fls_data_pool;
static pthread_key_t coro_thread_key = 0;
static pthread_key_t isolate_key = 0x7777;
static pthread_key_t thread_id_key = 0x7777;
static pthread_key_t thread_data_key = 0x7777;
static size_t stack_size = 0;
static size_t coroutines_created_ = 0;
static vector<Coroutine*> fiber_pool;
static Coroutine* delete_me = NULL;
size_t Coroutine::pool_size = 120;
static bool can_poke(void* addr) {
#ifdef WINDOWS
MEMORY_BASIC_INFORMATION mbi;
if (!VirtualQueryEx(GetCurrentProcess(), addr, &mbi, sizeof(mbi))) {
return false;
}
if (!(mbi.State & MEM_COMMIT)) {
return false;
}
return true;
#else
// TODO: Check pointer on other OS's? Windows is the only case I've seen so far that has
// spooky gaps in the TLS key space
return true;
#endif
}
#ifdef USE_V8_SYMBOLS
// Some distributions of node, most notably Ubuntu, strip the v8 internal symbols and so we don't
// have access to this stuff. In most cases we will use the more complicated `find_thread_id_key`
// below, since it tends to work on more platforms.
namespace v8 {
namespace base {
class Thread {
public: typedef int32_t LocalStorageKey;
};
}
namespace internal {
class Isolate {
public:
static base::Thread::LocalStorageKey isolate_key_;
static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
static base::Thread::LocalStorageKey thread_id_key_;
};
}
}
#endif
#ifndef WINDOWS
static void* find_thread_id_key(void* arg)
#else
static DWORD __stdcall find_thread_id_key(LPVOID arg)
#endif
{
v8::Isolate* isolate = static_cast<v8::Isolate*>(arg);
assert(isolate != NULL);
v8::Locker locker(isolate);
isolate->Enter();
// First pass-- find isolate thread key
for (pthread_key_t ii = coro_thread_key; ii > 0; --ii) {
void* tls = pthread_getspecific(ii - 1);
if (tls == isolate) {
isolate_key = ii - 1;
break;
}
}
assert(isolate_key != 0x7777);
// Second pass-- find data key
int thread_id = 0;
for (pthread_key_t ii = isolate_key + 2; ii < coro_thread_key; ++ii) {
void* tls = pthread_getspecific(ii);
if (can_poke(tls) && *(void**)tls == isolate) {
// First member of per-thread data is the isolate
thread_data_key = ii;
// Second member is the thread id
thread_id = *(int*)((void**)tls + 1);
break;
}
}
assert(thread_data_key != 0x7777);
// Third pass-- find thread id key
for (pthread_key_t ii = isolate_key + 1; ii < thread_data_key; ++ii) {
int tls = static_cast<int>(reinterpret_cast<intptr_t>(pthread_getspecific(ii)));
if (tls == thread_id) {
thread_id_key = ii;
break;
}
}
assert(thread_id_key != 0x7777);
isolate->Exit();
return NULL;
}
/**
* Coroutine class definition
*/
void Coroutine::init(v8::Isolate* isolate) {
v8::Unlocker unlocker(isolate);
pthread_key_create(&coro_thread_key, NULL);
pthread_setspecific(coro_thread_key, &current());
#ifdef USE_V8_SYMBOLS
isolate_key = v8::internal::Isolate::isolate_key_;
thread_data_key = v8::internal::Isolate::per_isolate_thread_data_key_;
thread_id_key = v8::internal::Isolate::thread_id_key_;
#else
pthread_t thread;
pthread_create(&thread, NULL, find_thread_id_key, isolate);
pthread_join(thread, NULL);
#endif
}
Coroutine& Coroutine::current() {
Coroutine* current = static_cast<Coroutine*>(pthread_getspecific(coro_thread_key));
if (!current) {
current = new Coroutine;
pthread_setspecific(coro_thread_key, current);
}
return *current;
}
void Coroutine::set_stack_size(unsigned int size) {
assert(!stack_size);
stack_size = size;
}
size_t Coroutine::coroutines_created() {
return coroutines_created_;
}
void Coroutine::trampoline(void* that) {
#ifdef CORO_PTHREAD
pthread_setspecific(coro_thread_key, that);
#endif
#ifdef CORO_FIBER
// I can't figure out how to get the precise base of the stack in Windows. Since CreateFiber
// creates the stack automatically we don't have access to the base. We can however grab the
// current esp position, and use that as an approximation. Padding is added for safety since the
// base is slightly different.
static_cast<Coroutine*>(that)->stack_base = (size_t*)_AddressOfReturnAddress() - stack_size + 16;
#endif
if (!fls_data_pool.empty()) {
pthread_setspecific(thread_data_key, fls_data_pool.back());
pthread_setspecific(thread_id_key, fls_data_pool.at(fls_data_pool.size() - 2));
pthread_setspecific(isolate_key, fls_data_pool.at(fls_data_pool.size() - 3));
fls_data_pool.resize(fls_data_pool.size() - 3);
}
while (true) {
static_cast<Coroutine*>(that)->entry(const_cast<void*>(static_cast<Coroutine*>(that)->arg));
}
}
Coroutine::Coroutine() :
fls_data(v8_tls_keys),
entry(NULL),
arg(NULL) {
stack.sptr = NULL;
coro_create(&context, NULL, NULL, NULL, 0);
}
Coroutine::Coroutine(entry_t& entry, void* arg) :
fls_data(v8_tls_keys),
entry(entry),
arg(arg) {
}
Coroutine::~Coroutine() {
if (stack.sptr) {
coro_stack_free(&stack);
}
#ifdef CORO_FIBER
if (context.fiber)
#endif
(void)coro_destroy(&context);
}
Coroutine* Coroutine::create_fiber(entry_t* entry, void* arg) {
if (!fiber_pool.empty()) {
Coroutine* fiber = fiber_pool.back();
fiber_pool.pop_back();
fiber->reset(entry, arg);
return fiber;
}
Coroutine* coro = new Coroutine(*entry, arg);
if (!coro_stack_alloc(&coro->stack, stack_size)) {
delete coro;
return NULL;
}
coro_create(&coro->context, trampoline, coro, coro->stack.sptr, coro->stack.ssze);
#ifdef CORO_FIBER
// Stupid hack. libcoro's project structure combined with Windows's CreateFiber functions makes
// it difficult to catch this error. Sometimes Windows will return `ERROR_NOT_ENOUGH_MEMORY` or
// `ERROR_COMMITMENT_LIMIT` if it can't make any more fibers. However, `coro_stack_alloc` returns
// success unconditionally on Windows so we have to detect the error here, after the call to
// `coro_create`.
if (!coro->context.fiber) {
delete coro;
return NULL;
}
#endif
++coroutines_created_;
return coro;
}
void Coroutine::reset(entry_t* entry, void* arg) {
assert(entry != NULL);
this->entry = entry;
this->arg = arg;
}
void Coroutine::transfer(Coroutine& next) {
assert(this != &next);
#ifndef CORO_PTHREAD
fls_data[0] = pthread_getspecific(isolate_key);
fls_data[1] = pthread_getspecific(thread_id_key);
fls_data[2] = pthread_getspecific(thread_data_key);
pthread_setspecific(isolate_key, next.fls_data[0]);
pthread_setspecific(thread_id_key, next.fls_data[1]);
pthread_setspecific(thread_data_key, next.fls_data[2]);
pthread_setspecific(coro_thread_key, &next);
#endif
coro_transfer(&context, &next.context);
#ifndef CORO_PTHREAD
pthread_setspecific(coro_thread_key, this);
#endif
}
void Coroutine::run() {
Coroutine& current = Coroutine::current();
assert(!delete_me);
assert(&current != this);
current.transfer(*this);
if (delete_me) {
// This means finish() was called on the coroutine and the pool was full so this coroutine needs
// to be deleted. We can't delete from inside finish(), because that would deallocate the
// current stack. However we CAN delete here, we just have to be very careful.
assert(delete_me == this);
assert(&current != this);
delete_me = NULL;
delete this;
}
}
void Coroutine::finish(Coroutine& next, v8::Isolate* isolate) {
{
assert(&next != this);
assert(&current() == this);
if (fiber_pool.size() < pool_size) {
fiber_pool.push_back(this);
} else {
#if V8_MAJOR_VERSION > 4 || (V8_MAJOR_VERSION == 4 && V8_MINOR_VERSION >= 10)
// Clean up isolate data
isolate->DiscardThreadSpecificMetadata();
#else
// If not supported, then we can mitigate v8's leakage by saving these thread locals.
fls_data_pool.reserve(fls_data_pool.size() + 3);
fls_data_pool.push_back(pthread_getspecific(isolate_key));
fls_data_pool.push_back(pthread_getspecific(thread_id_key));
fls_data_pool.push_back(pthread_getspecific(thread_data_key));
#endif
// Can't delete right now because we're currently on this stack!
assert(delete_me == NULL);
delete_me = this;
}
}
this->transfer(next);
}
void* Coroutine::bottom() const {
#ifdef CORO_FIBER
return stack_base;
#else
return stack.sptr;
#endif
}
size_t Coroutine::size() const {
return sizeof(Coroutine) + stack_size * sizeof(void*);
}
... ...
#include <node.h>
#include <stdlib.h>
#include <vector>
#include "libcoro/coro.h"
class Coroutine {
public:
typedef void(entry_t)(void*);
private:
#ifdef CORO_FIBER
void* stack_base;
#endif
coro_context context;
coro_stack stack;
std::vector<void*> fls_data;
entry_t* entry;
void* arg;
~Coroutine();
/**
* Constructor for currently running "fiber". This is really just original thread, but we
* need a way to get back into the main thread after yielding to a fiber. Basically this
* shouldn't be called from anywhere.
*/
Coroutine();
/**
* This constructor will actually create a new fiber context. Execution does not begin
* until you call run() for the first time.
*/
Coroutine(entry_t& entry, void* arg);
/**
* Resets the context of this coroutine from the start. Used to recyle old coroutines.
*/
void reset(entry_t* entry, void* arg);
static void trampoline(void* that);
void transfer(Coroutine& next);
public:
static size_t pool_size;
/**
* Returns the currently-running fiber.
*/
static Coroutine& current();
/**
* Create a new fiber.
*/
static Coroutine* create_fiber(entry_t* entry, void* arg = NULL);
/**
* Initialize the library.
*/
static void init(v8::Isolate* isolate);
/**
* Set the size of coroutines created by this library. Since coroutines are pooled the stack
* size is global instead of per-coroutine. Stack is measured in sizeof(void*), so
* set_stack_size(128) -> 512 bytes or 1kb
*/
static void set_stack_size(unsigned int size);
/**
* Get the number of coroutines that have been created.
*/
static size_t coroutines_created();
/**
* Start or resume execution in this fiber. Note there is no explicit yield() function,
* you must manually run another fiber.
*/
void run();
/**
* Finish this coroutine.. This will halt execution of this coroutine and resume execution
* of `next`. If you do not call this function, and instead just return from `entry` the
* application will exit. This function may or may not actually return.
*/
void finish(Coroutine& next, v8::Isolate* isolate);
/**
* Returns address of the lowest usable byte in this Coroutine's stack.
*/
void* bottom() const;
/**
* Returns the size this Coroutine takes up in the heap.
*/
size_t size() const;
};
... ...
#include "coroutine.h"
#include "v8-version.h"
#include <assert.h>
#include <node.h>
#include <node_version.h>
#include <vector>
#include <iostream>
#define THROW(x, m) return uni::Return(uni::ThrowException(Isolate::GetCurrent(), x(uni::NewLatin1String(Isolate::GetCurrent(), m))), args)
// Run GC more often when debugging
#ifdef DEBUG
#define GC_ADJUST 100
#else
#define GC_ADJUST 1
#endif
using namespace std;
using namespace v8;
// Handle legacy V8 API
namespace uni {
#if V8_MAJOR_VERSION > 5 || (V8_MAJOR_VERSION == 5 && V8_MINOR_VERSION >= 2)
// Actually 5.2.244
template <void (*F)(void*), class P>
void WeakCallbackShim(const WeakCallbackInfo<P>& data) {
F(data.GetParameter());
}
template <void (*F)(void*), class T, typename P>
void MakeWeak(Isolate* isolate, Persistent<T>& handle, P* val) {
handle.SetWeak(val, WeakCallbackShim<F, P>, WeakCallbackType::kFinalizer);
}
#elif V8_MAJOR_VERSION > 3 || (V8_MAJOR_VERSION == 3 && V8_MINOR_VERSION >= 26)
template <void (*F)(void*), class T, typename P>
void WeakCallbackShim(const v8::WeakCallbackData<T, P>& data) {
F(data.GetParameter());
}
template <void (*F)(void*), class T, typename P>
void MakeWeak(Isolate* isolate, Persistent<T>& handle, P* val) {
handle.SetWeak(val, WeakCallbackShim<F>);
}
#else
template <void (*F)(void*)>
void WeakCallbackShim(Persistent<Value> value, void* data) {
F(data);
}
template <void (*F)(void*), class T, typename P>
void MakeWeak(Isolate* isolate, Persistent<T>& handle, P* val) {
handle.MakeWeak(val, WeakCallbackShim<F>);
}
#endif
#if V8_MAJOR_VERSION > 3 || (V8_MAJOR_VERSION == 3 && V8_MINOR_VERSION >= 26)
// Node v0.11.13+
typedef PropertyCallbackInfo<Value> GetterCallbackInfo;
typedef PropertyCallbackInfo<void> SetterCallbackInfo;
typedef void FunctionType;
typedef FunctionCallbackInfo<v8::Value> Arguments;
class HandleScope {
v8::HandleScope scope;
public: HandleScope(Isolate* isolate) : scope(isolate) {}
};
template <class T>
void Reset(Isolate* isolate, Persistent<T>& persistent, Handle<T> handle) {
persistent.Reset(isolate, handle);
}
template <class T>
void Dispose(Isolate* isolate, Persistent<T>& handle) {
handle.Reset();
}
template <class T>
void ClearWeak(Isolate* isolate, Persistent<T>& handle) {
handle.ClearWeak(isolate);
}
template <class T>
void SetInternalPointer(Handle<T> handle, int index, void* val) {
handle->SetAlignedPointerInInternalField(index, val);
}
template <class T>
void* GetInternalPointer(Handle<T> handle, int index) {
return handle->GetAlignedPointerFromInternalField(index);
}
template <class T>
Handle<T> Deref(Isolate* isolate, Persistent<T>& handle) {
return Local<T>::New(isolate, handle);
}
template <class T>
void Return(Handle<T> handle, const Arguments& args) {
args.GetReturnValue().Set(handle);
}
template <class T>
void Return(Handle<T> handle, GetterCallbackInfo info) {
info.GetReturnValue().Set(handle);
}
template <class T>
void Return(Persistent<T>& handle, GetterCallbackInfo info) {
info.GetReturnValue().Set(handle);
}
Handle<Value> ThrowException(Isolate* isolate, Handle<Value> exception) {
return isolate->ThrowException(exception);
}
Handle<Context> GetCurrentContext(Isolate* isolate) {
return isolate->GetCurrentContext();
}
Handle<Primitive> Undefined(Isolate* isolate) {
return v8::Undefined(isolate);
}
Handle<String> NewLatin1String(Isolate* isolate, const char* string) {
return String::NewFromOneByte(isolate, (const uint8_t*)string);
}
Handle<String> NewLatin1Symbol(Isolate* isolate, const char* string) {
return String::NewFromOneByte(isolate, (const uint8_t*)string);
}
Handle<Boolean> NewBoolean(Isolate* isolate, bool value) {
return Boolean::New(isolate, value);
}
Handle<Number> NewNumber(Isolate* isolate, double value) {
return Number::New(isolate, value);
}
Handle<FunctionTemplate> NewFunctionTemplate(
Isolate* isolate,
FunctionCallback callback,
Handle<Value> data = Handle<Value>(),
Handle<Signature> signature = Handle<Signature>(),
int length = 0
) {
return FunctionTemplate::New(isolate, callback, data, signature, length);
}
Handle<Signature> NewSignature(
Isolate* isolate,
Handle<FunctionTemplate> receiver = Handle<FunctionTemplate>()
) {
return Signature::New(isolate, receiver);
}
class ReverseIsolateScope {
Isolate* isolate;
public:
explicit inline ReverseIsolateScope(Isolate* isolate) : isolate(isolate) {
isolate->Exit();
}
inline ~ReverseIsolateScope() {
isolate->Enter();
}
};
void AdjustAmountOfExternalAllocatedMemory(Isolate* isolate, int64_t change_in_bytes) {
isolate->AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
#else
// Node v0.10.x and lower
typedef AccessorInfo GetterCallbackInfo;
typedef AccessorInfo SetterCallbackInfo;
typedef Handle<Value> FunctionType;
typedef Arguments Arguments;
class HandleScope {
v8::HandleScope scope;
public: HandleScope(Isolate* isolate) {}
};
template <class T>
void Reset(Isolate* isolate, Persistent<T>& persistent, Handle<T> handle) {
persistent = Persistent<T>::New(handle);
}
template <class T>
void Dispose(Isolate* isolate, Persistent<T>& handle) {
handle.Dispose();
}
template <class T>
void ClearWeak(Isolate* isolate, Persistent<T>& handle) {
handle.ClearWeak();
}
template <class T>
void SetInternalPointer(Handle<T> handle, int index, void* val) {
handle->SetPointerInInternalField(index, val);
}
template <class T>
void* GetInternalPointer(Handle<T> handle, int index) {
return handle->GetPointerFromInternalField(index);
}
template <class T>
Handle<T> Deref(Isolate* isolate, Persistent<T>& handle) {
return Local<T>::New(handle);
}
Handle<Value> Return(Handle<Value> handle, GetterCallbackInfo info) {
return handle;
}
Handle<Value> Return(Handle<Value> handle, const Arguments& args) {
return handle;
}
Handle<Value> ThrowException(Isolate* isolate, Handle<Value> exception) {
return ThrowException(exception);
}
Handle<Context> GetCurrentContext(Isolate* isolate) {
return Context::GetCurrent();
}
Handle<Primitive> Undefined(Isolate* isolate) {
return v8::Undefined();
}
Handle<String> NewLatin1String(Isolate* isolate, const char* string) {
return String::New(string);
}
Handle<String> NewLatin1Symbol(Isolate* isolate, const char* string) {
return String::NewSymbol(string);
}
Handle<Boolean> NewBoolean(Isolate* isolate, bool value) {
return Boolean::New(value);
}
Handle<Number> NewNumber(Isolate* isolate, double value) {
return Number::New(value);
}
Handle<FunctionTemplate> NewFunctionTemplate(
Isolate* isolate,
InvocationCallback callback,
Handle<Value> data = Handle<Value>(),
Handle<Signature> signature = Handle<Signature>(),
int length = 0
) {
return FunctionTemplate::New(callback, data, signature);
}
Handle<Signature> NewSignature(
Isolate* isolate,
Handle<FunctionTemplate> receiver = Handle<FunctionTemplate>(),
int argc = 0,
Handle<FunctionTemplate> argv[] = 0
) {
return Signature::New(receiver, argc, argv);
}
class ReverseIsolateScope {
public: explicit inline ReverseIsolateScope(Isolate* isolate) {}
};
void AdjustAmountOfExternalAllocatedMemory(Isolate* isolate, int64_t change_in_bytes) {
V8::AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
}
#endif
#if V8_MAJOR_VERSION > 3 || (V8_MAJOR_VERSION == 3 && V8_MINOR_VERSION >= 29)
// This was actually added in 3.29.67
void SetStackGuard(Isolate* isolate, void* guard) {
isolate->SetStackLimit(reinterpret_cast<uintptr_t>(guard));
}
#elif V8_MAJOR_VERSION > 3 || (V8_MAJOR_VERSION == 3 && V8_MINOR_VERSION >= 26)
void SetStackGuard(Isolate* isolate, void* guard) {
ResourceConstraints constraints;
constraints.set_stack_limit(reinterpret_cast<uint32_t*>(guard));
v8::SetResourceConstraints(isolate, &constraints);
}
#else
// Extra padding for old versions of v8. Shit's fucked.
void SetStackGuard(Isolate* isolate, void* guard) {
ResourceConstraints constraints;
constraints.set_stack_limit(
reinterpret_cast<uint32_t*>(guard) + 18 * 1024
);
v8::SetResourceConstraints(&constraints);
}
#endif
}
class Fiber {
private:
static Locker* global_locker; // Node does not use locks or threads, so we need a global lock
static Persistent<FunctionTemplate> tmpl;
static Persistent<Function> fiber_object;
static Fiber* current;
static vector<Fiber*> orphaned_fibers;
static Persistent<Value> fatal_stack;
Isolate* isolate;
Persistent<Object> handle;
Persistent<Function> cb;
Persistent<Context> v8_context;
Persistent<Value> zombie_exception;
Persistent<Value> yielded;
bool yielded_exception;
Coroutine* entry_fiber;
Coroutine* this_fiber;
bool started;
bool yielding;
bool zombie;
bool resetting;
static Fiber& Unwrap(Handle<Object> handle) {
assert(!handle.IsEmpty());
assert(handle->InternalFieldCount() == 1);
return *static_cast<Fiber*>(uni::GetInternalPointer(handle, 0));
}
Fiber(Handle<Object> handle, Handle<Function> cb, Handle<Context> v8_context) :
isolate(Isolate::GetCurrent()),
started(false),
yielding(false),
zombie(false),
resetting(false) {
uni::Reset(isolate, this->handle, handle);
uni::Reset(isolate, this->cb, cb);
uni::Reset(isolate, this->v8_context, v8_context);
MakeWeak();
uni::SetInternalPointer(handle, 0, this);
}
virtual ~Fiber() {
assert(!this->started);
uni::Dispose(isolate, handle);
uni::Dispose(isolate, cb);
uni::Dispose(isolate, v8_context);
}
/**
* Call MakeWeak if it's ok for v8 to garbage collect this Fiber.
* i.e. After fiber completes, while yielded, or before started
*/
void MakeWeak() {
uni::MakeWeak<WeakCallback>(isolate, handle, (void*)this);
}
/**
* And call ClearWeak if it's not ok for v8 to garbage collect this Fiber.
* i.e. While running.
*/
void ClearWeak() {
handle.ClearWeak();
}
/**
* Called when there are no more references to this object in Javascript. If this happens and
* the fiber is currently suspended we'll unwind the fiber's stack by throwing exceptions in
* order to clear all references.
*/
static void WeakCallback(void* data) {
Fiber& that = *static_cast<Fiber*>(data);
assert(that.handle.IsNearDeath());
assert(current != &that);
// We'll unwind running fibers later... doing it from the garbage collector is bad news.
if (that.started) {
assert(that.yielding);
orphaned_fibers.push_back(&that);
that.ClearWeak();
return;
}
delete &that;
}
/**
* When the v8 garbage collector notifies us about dying fibers instead of unwindng their
* stack as soon as possible we put them aside to unwind later. Unwinding from the garbage
* collector leads to exponential time garbage collections if there are many orphaned Fibers,
* there's also the possibility of running out of stack space. It's generally bad news.
*
* So instead we have this function to clean up all the fibers after the garbage collection
* has finished.
*/
static void DestroyOrphans() {
if (orphaned_fibers.empty()) {
return;
}
vector<Fiber*> orphans(orphaned_fibers);
orphaned_fibers.clear();
for (vector<Fiber*>::iterator ii = orphans.begin(); ii != orphans.end(); ++ii) {
Fiber& that = **ii;
that.UnwindStack();
if (that.yielded_exception) {
// If you throw an exception from a fiber that's being garbage collected there's no way
// to bubble that exception up to the application.
String::Utf8Value stack(uni::Deref(that.isolate, fatal_stack));
cerr <<
"An exception was thrown from a Fiber which was being garbage collected. This error "
"can not be gracefully recovered from. The only acceptable behavior is to terminate "
"this application. The exception appears below:\n\n"
<<*stack <<"\n";
exit(1);
} else {
uni::Dispose(that.isolate, fatal_stack);
}
uni::Dispose(that.isolate, that.yielded);
that.MakeWeak();
}
}
/**
* Instantiate a new Fiber object. When a fiber is created it only grabs a handle to the
* callback; it doesn't create any new contexts until run() is called.
*/
static uni::FunctionType New(const uni::Arguments& args) {
if (args.Length() != 1) {
THROW(Exception::TypeError, "Fiber expects 1 argument");
} else if (!args[0]->IsFunction()) {
THROW(Exception::TypeError, "Fiber expects a function");
} else if (!args.IsConstructCall()) {
Handle<Value> argv[1] = { args[0] };
return uni::Return(uni::Deref(Isolate::GetCurrent(), tmpl)->GetFunction()->NewInstance(1, argv), args);
}
Handle<Function> fn = Handle<Function>::Cast(args[0]);
new Fiber(args.This(), fn, uni::GetCurrentContext(Isolate::GetCurrent()));
return uni::Return(args.This(), args);
}
/**
* Begin or resume the current fiber. If the fiber is not currently running a new context will
* be created and the callback will start. Otherwise we switch back into the exist context.
*/
static uni::FunctionType Run(const uni::Arguments& args) {
Fiber& that = Unwrap(args.Holder());
// There seems to be no better place to put this check..
DestroyOrphans();
if (that.started && !that.yielding) {
THROW(Exception::Error, "This Fiber is already running");
} else if (args.Length() > 1) {
THROW(Exception::TypeError, "run() excepts 1 or no arguments");
}
if (!that.started) {
// Create a new context with entry point `Fiber::RunFiber()`.
void** data = new void*[2];
data[0] = (void*)&args;
data[1] = &that;
that.this_fiber = Coroutine::create_fiber((void (*)(void*))RunFiber, data);
if (!that.this_fiber) {
delete[] data;
THROW(Exception::RangeError, "Out of memory");
}
that.started = true;
uni::AdjustAmountOfExternalAllocatedMemory(that.isolate, that.this_fiber->size() * GC_ADJUST);
} else {
// If the fiber is currently running put the first parameter to `run()` on `yielded`, then
// the pending call to `yield()` will return that value. `yielded` in this case is just a
// misnomer, we're just reusing the same handle.
that.yielded_exception = false;
if (args.Length()) {
uni::Reset(that.isolate, that.yielded, args[0]);
} else {
uni::Reset<Value>(that.isolate, that.yielded, uni::Undefined(that.isolate));
}
}
that.SwapContext();
return uni::Return(that.ReturnYielded(), args);
}
/**
* Throw an exception into a currently yielding fiber.
*/
static uni::FunctionType ThrowInto(const uni::Arguments& args) {
Fiber& that = Unwrap(args.Holder());
if (!that.yielding) {
THROW(Exception::Error, "This Fiber is not yielding");
} else if (args.Length() == 0) {
uni::Reset<Value>(that.isolate, that.yielded, uni::Undefined(that.isolate));
} else if (args.Length() == 1) {
uni::Reset(that.isolate, that.yielded, args[0]);
} else {
THROW(Exception::TypeError, "throwInto() expects 1 or no arguments");
}
that.yielded_exception = true;
that.SwapContext();
return uni::Return(that.ReturnYielded(), args);
}
/**
* Unwinds a currently running fiber. If the fiber is not running then this function has no
* effect.
*/
static uni::FunctionType Reset(const uni::Arguments& args) {
Fiber& that = Unwrap(args.Holder());
if (!that.started) {
return uni::Return(uni::Undefined(that.isolate), args);
} else if (!that.yielding) {
THROW(Exception::Error, "This Fiber is not yielding");
} else if (args.Length()) {
THROW(Exception::TypeError, "reset() expects no arguments");
}
that.resetting = true;
that.UnwindStack();
that.resetting = false;
that.MakeWeak();
Handle<Value> val = uni::Deref(that.isolate, that.yielded);
uni::Dispose(that.isolate, that.yielded);
if (that.yielded_exception) {
return uni::Return(uni::ThrowException(that.isolate, val), args);
} else {
return uni::Return(val, args);
}
}
/**
* Turns the fiber into a zombie and unwinds its whole stack.
*
* After calling this function you must either destroy this fiber or call MakeWeak() or it will
* be leaked.
*/
void UnwindStack() {
assert(!zombie);
assert(started);
assert(yielding);
zombie = true;
// Setup an exception which will be thrown and rethrown from Fiber::Yield()
Handle<Value> zombie_exception = Exception::Error(uni::NewLatin1String(isolate, "This Fiber is a zombie"));
uni::Reset(isolate, this->zombie_exception, zombie_exception);
uni::Reset(isolate, yielded, zombie_exception);
yielded_exception = true;
// Swap context back to Fiber::Yield() which will throw an exception to unwind the stack.
// Futher calls to yield from this fiber will rethrow the same exception.
SwapContext();
assert(!started);
zombie = false;
// Make sure this is the exception we threw
if (yielded_exception && yielded == zombie_exception) {
yielded_exception = false;
uni::Dispose(isolate, yielded);
uni::Reset<Value>(isolate, yielded, uni::Undefined(isolate));
}
uni::Dispose(isolate, this->zombie_exception);
}
/**
* Common logic between Run(), ThrowInto(), and UnwindStack(). This is essentially just a
* wrapper around this->fiber->() which also handles all the bookkeeping needed.
*/
void SwapContext() {
entry_fiber = &Coroutine::current();
Fiber* last_fiber = current;
current = this;
// This will jump into either `RunFiber()` or `Yield()`, depending on if the fiber was
// already running.
{
Unlocker unlocker(isolate);
uni::ReverseIsolateScope isolate_scope(isolate);
this_fiber->run();
}
// At this point the fiber either returned or called `yield()`.
current = last_fiber;
}
/**
* Grabs and resets this fiber's yielded value.
*/
Handle<Value> ReturnYielded() {
Handle<Value> val = uni::Deref(isolate, yielded);
uni::Dispose(isolate, yielded);
if (yielded_exception) {
return uni::ThrowException(isolate, val);
} else {
return val;
}
}
/**
* This is the entry point for a new fiber, from `run()`.
*/
static void RunFiber(void** data) {
const uni::Arguments* args = (const uni::Arguments*)data[0];
Fiber& that = *(Fiber*)data[1];
delete[] data;
// New C scope so that the stack-allocated objects will be destroyed before calling
// Coroutine::finish, because that function may not return, in which case the destructors in
// this function won't be called.
{
Locker locker(that.isolate);
Isolate::Scope isolate_scope(that.isolate);
uni::HandleScope scope(that.isolate);
// Set the stack guard for this "thread"; allow 6k of padding past the JS limit for
// native v8 code to run
uni::SetStackGuard(that.isolate, reinterpret_cast<char*>(that.this_fiber->bottom()) + 1024 * 6);
TryCatch try_catch;
that.ClearWeak();
Handle<Context> v8_context = uni::Deref(that.isolate, that.v8_context);
v8_context->Enter();
// Workaround for v8 issue #1180
// http://code.google.com/p/v8/issues/detail?id=1180
Script::Compile(uni::NewLatin1String(that.isolate, "void 0;"));
Handle<Value> yielded;
if (args->Length()) {
Handle<Value> argv[1] = { (*args)[0] };
yielded = uni::Deref(that.isolate, that.cb)->Call(v8_context->Global(), 1, argv);
} else {
yielded = uni::Deref(that.isolate, that.cb)->Call(v8_context->Global(), 0, NULL);
}
if (try_catch.HasCaught()) {
uni::Reset(that.isolate, that.yielded, try_catch.Exception());
that.yielded_exception = true;
if (that.zombie && !that.resetting && !uni::Deref(that.isolate, that.yielded)->StrictEquals(uni::Deref(that.isolate, that.zombie_exception))) {
// Throwing an exception from a garbage sweep
uni::Reset(that.isolate, fatal_stack, try_catch.StackTrace());
}
} else {
uni::Reset(that.isolate, that.yielded, yielded);
that.yielded_exception = false;
}
// Do not invoke the garbage collector if there's no context on the stack. It will seg fault
// otherwise.
uni::AdjustAmountOfExternalAllocatedMemory(that.isolate, -(int)(that.this_fiber->size() * GC_ADJUST));
// Don't make weak until after notifying the garbage collector. Otherwise it may try and
// free this very fiber!
if (!that.zombie) {
that.MakeWeak();
}
// Now safe to leave the context, this stack is done with JS.
v8_context->Exit();
}
// The function returned (instead of yielding).
that.started = false;
that.this_fiber->finish(*that.entry_fiber, that.isolate);
}
/**
* Yield control back to the function that called `run()`. The first parameter to this function
* is returned from `run()`. The context is saved, to be later resumed from `run()`.
* note: sigh, there is a #define Yield() in WinBase.h on Windows
*/
static uni::FunctionType Yield_(const uni::Arguments& args) {
if (current == NULL) {
THROW(Exception::Error, "yield() called with no fiber running");
}
Fiber& that = *current;
if (that.zombie) {
return uni::Return(uni::ThrowException(that.isolate, uni::Deref(that.isolate, that.zombie_exception)), args);
} else if (args.Length() == 0) {
uni::Reset<Value>(that.isolate, that.yielded, Undefined(that.isolate));
} else if (args.Length() == 1) {
uni::Reset(that.isolate, that.yielded, args[0]);
} else {
THROW(Exception::TypeError, "yield() expects 1 or no arguments");
}
that.yielded_exception = false;
// While not running this can be garbage collected if no one has a handle.
that.MakeWeak();
// Return control back to `Fiber::run()`. While control is outside this function we mark it as
// ok to garbage collect. If no one ever has a handle to resume the function it's harmful to
// keep the handle around.
{
Unlocker unlocker(that.isolate);
uni::ReverseIsolateScope isolate_scope(that.isolate);
that.yielding = true;
that.entry_fiber->run();
that.yielding = false;
}
// Now `run()` has been called again.
// Don't garbage collect anymore!
that.ClearWeak();
// Return the yielded value
return uni::Return(that.ReturnYielded(), args);
}
/**
* Getters for `started`, and `current`.
*/
static uni::FunctionType GetStarted(Local<String> property, const uni::GetterCallbackInfo& info) {
if (info.This().IsEmpty() || info.This()->InternalFieldCount() != 1) {
return uni::Return(uni::Undefined(Isolate::GetCurrent()), info);
}
Fiber& that = Unwrap(info.This());
return uni::Return(uni::NewBoolean(that.isolate, that.started), info);
}
static uni::FunctionType GetCurrent(Local<String> property, const uni::GetterCallbackInfo& info) {
if (current) {
return uni::Return(current->handle, info);
} else {
return uni::Return(uni::Undefined(Isolate::GetCurrent()), info);
}
}
/**
* Allow access to coroutine pool size
*/
static uni::FunctionType GetPoolSize(Local<String> property, const uni::GetterCallbackInfo& info) {
return uni::Return(uni::NewNumber(Isolate::GetCurrent(), Coroutine::pool_size), info);
}
static void SetPoolSize(Local<String> property, Local<Value> value, const uni::SetterCallbackInfo& info) {
Coroutine::pool_size = value->ToNumber()->Value();
}
/**
* Return number of fibers that have been created
*/
static uni::FunctionType GetFibersCreated(Local<String> property, const uni::GetterCallbackInfo& info) {
return uni::Return(uni::NewNumber(Isolate::GetCurrent(), Coroutine::coroutines_created()), info);
}
public:
/**
* Initialize the Fiber library.
*/
static void Init(Handle<Object> target) {
// Use a locker which won't get destroyed when this library gets unloaded. This is a hack
// to prevent v8 from trying to clean up this "thread" while the whole application is
// shutting down. TODO: There's likely a better way to accomplish this, but since the
// application is going down lost memory isn't the end of the world. But with a regular lock
// there's seg faults when node shuts down.
Isolate* isolate = Isolate::GetCurrent();
global_locker = new Locker(isolate);
current = NULL;
// Fiber constructor
Handle<FunctionTemplate> tmpl = uni::NewFunctionTemplate(isolate, New);
uni::Reset(isolate, Fiber::tmpl, tmpl);
tmpl->SetClassName(uni::NewLatin1Symbol(isolate, "Fiber"));
// Guard which only allows these methods to be called on a fiber; prevents
// `fiber.run.call({})` from seg faulting.
Handle<Signature> sig = uni::NewSignature(isolate, tmpl);
tmpl->InstanceTemplate()->SetInternalFieldCount(1);
// Fiber.prototype
Handle<ObjectTemplate> proto = tmpl->PrototypeTemplate();
proto->Set(uni::NewLatin1Symbol(isolate, "reset"),
uni::NewFunctionTemplate(isolate, Reset, Handle<Value>(), sig));
proto->Set(uni::NewLatin1Symbol(isolate, "run"),
uni::NewFunctionTemplate(isolate, Run, Handle<Value>(), sig));
proto->Set(uni::NewLatin1Symbol(isolate, "throwInto"),
uni::NewFunctionTemplate(isolate, ThrowInto, Handle<Value>(), sig));
proto->SetAccessor(uni::NewLatin1Symbol(isolate, "started"), GetStarted);
// Global yield() function
Handle<Function> yield = uni::NewFunctionTemplate(isolate, Yield_)->GetFunction();
Handle<String> sym_yield = uni::NewLatin1Symbol(isolate, "yield");
target->Set(sym_yield, yield);
// Fiber properties
Handle<Function> fn = tmpl->GetFunction();
fn->Set(sym_yield, yield);
fn->SetAccessor(uni::NewLatin1Symbol(isolate, "current"), GetCurrent);
fn->SetAccessor(uni::NewLatin1Symbol(isolate, "poolSize"), GetPoolSize, SetPoolSize);
fn->SetAccessor(uni::NewLatin1Symbol(isolate, "fibersCreated"), GetFibersCreated);
// Global Fiber
target->Set(uni::NewLatin1Symbol(isolate, "Fiber"), fn);
uni::Reset(isolate, fiber_object, fn);
}
};
Persistent<FunctionTemplate> Fiber::tmpl;
Persistent<Function> Fiber::fiber_object;
Locker* Fiber::global_locker;
Fiber* Fiber::current = NULL;
vector<Fiber*> Fiber::orphaned_fibers;
Persistent<Value> Fiber::fatal_stack;
bool did_init = false;
#if !NODE_VERSION_AT_LEAST(0,10,0)
extern "C"
#endif
void init(Handle<Object> target) {
Isolate* isolate = Isolate::GetCurrent();
if (did_init || !target->Get(uni::NewLatin1Symbol(isolate, "Fiber"))->IsUndefined()) {
// Oh god. Node will call init() twice even though the library was loaded only once. See Node
// issue #2621 (no fix).
return;
}
did_init = true;
uni::HandleScope scope(isolate);
Coroutine::init(isolate);
Fiber::Init(target);
// Default stack size of either 512k or 1M. Perhaps make this configurable by the run time?
Coroutine::set_stack_size(128 * 1024);
}
NODE_MODULE(fibers, init)
... ...
Copyright (c) 2000-2009 Marc Alexander Lehmann <schmorp@schmorp.de>
Redistribution and use in source and binary forms, with or without modifica-
tion, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Alternatively, the following files carry an additional notice that
explicitly allows relicensing under the GPLv2: coro.c, coro.h.
... ...
Configuration, documentation etc. is provided in the coro.h file. Please
note that the file conftest.c in this distribution is under the GPL. It is
not needed for proper operation of this library though, for that, coro.h
and coro.c suffice.
Marc Lehmann <schmorp@schmorp.de>
... ...
/*
* This file was taken from pth-1.40/aclocal.m4
* The original copyright is below.
*
* GNU Pth - The GNU Portable Threads
* Copyright (c) 1999-2001 Ralf S. Engelschall <rse@engelschall.com>
*
* This file is part of GNU Pth, a non-preemptive thread scheduling
* library which can be found at http://www.gnu.org/software/pth/.
*
* This file is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This file is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this file; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA, or contact Marc Lehmann <schmorp@schmorp.de>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(TEST_sigstack) || defined(TEST_sigaltstack)
#include <sys/types.h>
#include <signal.h>
#include <unistd.h>
#endif
#if defined(TEST_makecontext)
#include <ucontext.h>
#endif
union alltypes {
long l;
double d;
void *vp;
void (*fp)(void);
char *cp;
};
static volatile char *handler_addr = (char *)0xDEAD;
#if defined(TEST_sigstack) || defined(TEST_sigaltstack)
static volatile int handler_done = 0;
void handler(int sig)
{
char garbage[1024];
int i;
auto int dummy;
for (i = 0; i < 1024; i++)
garbage[i] = 'X';
handler_addr = (char *)&dummy;
handler_done = 1;
return;
}
#endif
#if defined(TEST_makecontext)
static ucontext_t uc_handler;
static ucontext_t uc_main;
void handler(void)
{
char garbage[1024];
int i;
auto int dummy;
for (i = 0; i < 1024; i++)
garbage[i] = 'X';
handler_addr = (char *)&dummy;
swapcontext(&uc_handler, &uc_main);
return;
}
#endif
int main(int argc, char *argv[])
{
FILE *f;
char *skaddr;
char *skbuf;
int sksize;
char result[1024];
int i;
sksize = 32768;
skbuf = (char *)malloc(sksize*2+2*sizeof(union alltypes));
if (skbuf == NULL)
exit(1);
for (i = 0; i < sksize*2+2*sizeof(union alltypes); i++)
skbuf[i] = 'A';
skaddr = skbuf+sizeof(union alltypes);
#if defined(TEST_sigstack) || defined(TEST_sigaltstack)
{
struct sigaction sa;
#if defined(TEST_sigstack)
struct sigstack ss;
#elif defined(TEST_sigaltstack) && defined(HAVE_STACK_T)
stack_t ss;
#else
struct sigaltstack ss;
#endif
#if defined(TEST_sigstack)
ss.ss_sp = (void *)(skaddr + sksize);
ss.ss_onstack = 0;
if (sigstack(&ss, NULL) < 0)
exit(1);
#elif defined(TEST_sigaltstack)
ss.ss_sp = (void *)(skaddr + sksize);
ss.ss_size = sksize;
ss.ss_flags = 0;
if (sigaltstack(&ss, NULL) < 0)
exit(1);
#endif
memset((void *)&sa, 0, sizeof(struct sigaction));
sa.sa_handler = handler;
sa.sa_flags = SA_ONSTACK;
sigemptyset(&sa.sa_mask);
sigaction(SIGUSR1, &sa, NULL);
kill(getpid(), SIGUSR1);
while (!handler_done)
/*nop*/;
}
#endif
#if defined(TEST_makecontext)
{
if (getcontext(&uc_handler) != 0)
exit(1);
uc_handler.uc_link = NULL;
uc_handler.uc_stack.ss_sp = (void *)(skaddr + sksize);
uc_handler.uc_stack.ss_size = sksize;
uc_handler.uc_stack.ss_flags = 0;
makecontext(&uc_handler, handler, 1);
swapcontext(&uc_main, &uc_handler);
}
#endif
if (handler_addr == (char *)0xDEAD)
exit(1);
if (handler_addr < skaddr+sksize) {
/* stack was placed into lower area */
if (*(skaddr+sksize) != 'A')
sprintf(result, "(skaddr)+(sksize)-%d,(sksize)-%d",
sizeof(union alltypes), sizeof(union alltypes));
else
strcpy(result, "(skaddr)+(sksize),(sksize)");
}
else {
/* stack was placed into higher area */
if (*(skaddr+sksize*2) != 'A')
sprintf(result, "(skaddr),(sksize)-%d", sizeof(union alltypes));
else
strcpy(result, "(skaddr),(sksize)");
}
printf("%s\n", result);
exit(0);
}
... ...