Skip to content

Commit

Permalink
Merge pull request webmachinelearning#1 from fujunwei/rename
Browse files Browse the repository at this point in the history
Rename
  • Loading branch information
huningxin authored Oct 15, 2020
2 parents dd26eb8 + 4daed9f commit 7cbb3fb
Show file tree
Hide file tree
Showing 46 changed files with 182 additions and 1,150 deletions.
8 changes: 4 additions & 4 deletions examples/SampleUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,14 @@

#include "SampleUtils.h"

#include <dawn/webgpu.h>
#include <dawn/webgpu_cpp.h>
#include <dawn/webnn.h>
#include <dawn/webnn_cpp.h>
#include <dawn/dawn_proc.h>
#include <dawn_native/DawnNative.h>

wgpu::NeuralNetworkContext CreateCppNeuralNetworkContext() {
wnn::NeuralNetworkContext CreateCppNeuralNetworkContext() {
DawnProcTable backendProcs = dawn_native::GetProcs();
dawnProcSetProcs(&backendProcs);
dawn_native::Adapter ml;
return wgpu::NeuralNetworkContext::Acquire(ml.CreateNeuralNetworkContext());
return wnn::NeuralNetworkContext::Acquire(ml.CreateNeuralNetworkContext());
}
6 changes: 3 additions & 3 deletions examples/SampleUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <dawn/webgpu.h>
#include <dawn/webgpu_cpp.h>
#include <dawn/webnn.h>
#include <dawn/webnn_cpp.h>

wgpu::NeuralNetworkContext CreateCppNeuralNetworkContext();
wnn::NeuralNetworkContext CreateCppNeuralNetworkContext();
26 changes: 13 additions & 13 deletions examples/matmul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,33 +17,33 @@
#include <stdio.h>
#include <vector>

void compute_callback(WGPUOutputs impl) {
void compute_callback(WNNOutputs impl) {
printf("outputs %p\n", (void*)impl);
}

void compilation_callback(WGPUCompilation impl) {
wgpu::Compilation exe;
void compilation_callback(WNNCompilation impl) {
wnn::Compilation exe;
exe.Acquire(impl);
std::vector<float> bufferA(2*3);
wgpu::Input a;
wnn::Input a;
a.buffer = bufferA.data();
a.size = bufferA.size();
wgpu::Inputs inputs;
wnn::Inputs inputs;
inputs.SetInput("a", &a);
wgpu::Outputs outputs;
wnn::Outputs outputs;
exe.Compute(inputs, compute_callback, outputs);
}

int main(int argc, const char* argv[]) {
wgpu::NeuralNetworkContext nn = CreateCppNeuralNetworkContext();
wnn::NeuralNetworkContext nn = CreateCppNeuralNetworkContext();
std::vector<int32_t> shapeA = {2, 3};
wgpu::OperandDescriptor descA = {wgpu::OperandType::Float32, shapeA.data(), (uint32_t)shapeA.size()};
wgpu::Operand a = nn.Input("a", &descA);
wnn::OperandDescriptor descA = {wnn::OperandType::Float32, shapeA.data(), (uint32_t)shapeA.size()};
wnn::Operand a = nn.Input("a", &descA);
std::vector<int32_t> shapeB = {3, 2};
wgpu::OperandDescriptor descB = {wgpu::OperandType::Float32, shapeB.data(), (uint32_t)shapeB.size()};
wnn::OperandDescriptor descB = {wnn::OperandType::Float32, shapeB.data(), (uint32_t)shapeB.size()};
std::vector<float> bufferB(3*2);
wgpu::Operand b = nn.Constant(&descB, bufferB.data(), bufferB.size(), 0);
wgpu::Operand c = nn.Matmul(a, b);
wgpu::Model model = nn.CreateModel();
wnn::Operand b = nn.Constant(&descB, bufferB.data(), bufferB.size(), 0);
wnn::Operand c = nn.Matmul(a, b);
wnn::Model model = nn.CreateModel();
model.Compile(compilation_callback);
}
44 changes: 22 additions & 22 deletions generator/dawn_json_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def __init__(self, name, json_data):
Type.__init__(self, name, json_data)
self.chained = json_data.get("chained", False)
self.extensible = json_data.get("extensible", False)
# Chained structs inherit from wgpu::ChainedStruct, which has
# Chained structs inherit from wnn::ChainedStruct, which has
# nextInChain, so setting both extensible and chained would result in
# two nextInChain members.
assert not (self.extensible and self.chained)
Expand Down Expand Up @@ -451,7 +451,7 @@ def as_cType(name):
if name.native:
return name.concatcase()
else:
return 'WGPU' + name.CamelCase()
return 'WNN' + name.CamelCase()


def as_cTypeDawn(name):
Expand Down Expand Up @@ -524,7 +524,7 @@ def annotated(typ, arg):

def as_cEnum(type_name, value_name):
assert not type_name.native and not value_name.native
return 'WGPU' + type_name.CamelCase() + '_' + value_name.CamelCase()
return 'WNN' + type_name.CamelCase() + '_' + value_name.CamelCase()


def as_cEnumDawn(type_name, value_name):
Expand All @@ -542,7 +542,7 @@ def as_cppEnum(value_name):

def as_cMethod(type_name, method_name):
assert not type_name.native and not method_name.native
return 'wgpu' + type_name.CamelCase() + method_name.CamelCase()
return 'wnn' + type_name.CamelCase() + method_name.CamelCase()


def as_cMethodDawn(type_name, method_name):
Expand All @@ -557,7 +557,7 @@ def as_MethodSuffix(type_name, method_name):

def as_cProc(type_name, method_name):
assert not type_name.native and not method_name.native
return 'WGPU' + 'Proc' + type_name.CamelCase() + method_name.CamelCase()
return 'WNN' + 'Proc' + type_name.CamelCase() + method_name.CamelCase()


def as_cProcDawn(type_name, method_name):
Expand All @@ -569,7 +569,7 @@ def as_frontendType(typ):
if typ.category == 'object':
return typ.name.CamelCase() + 'Base*'
elif typ.category in ['bitmask', 'enum']:
return 'wgpu::' + typ.name.CamelCase()
return 'wnn::' + typ.name.CamelCase()
elif typ.category == 'structure':
return as_cppType(typ.name)
else:
Expand All @@ -580,7 +580,7 @@ def as_wireType(typ):
if typ.category == 'object':
return typ.name.CamelCase() + '*'
elif typ.category in ['bitmask', 'enum']:
return 'WGPU' + typ.name.CamelCase()
return 'WNN' + typ.name.CamelCase()
else:
return as_cppType(typ.name)

Expand Down Expand Up @@ -610,7 +610,7 @@ def get_description(self):
def add_commandline_arguments(self, parser):
allowed_targets = [
'dawn_headers', 'dawncpp_headers', 'dawncpp', 'dawn_proc',
'mock_webgpu', 'dawn_wire', "dawn_native_utils"
'mock_webnn', 'dawn_wire', "dawn_native_utils"
]

parser.add_argument('--dawn-json',
Expand Down Expand Up @@ -671,7 +671,7 @@ def get_file_renders(self, args):

if 'dawn_headers' in targets:
renders.append(
FileRender('webgpu.h', 'src/include/dawn/webgpu.h',
FileRender('webnn.h', 'src/include/dawn/webnn.h',
[base_params, api_params]))
renders.append(
FileRender('dawn_proc_table.h',
Expand All @@ -680,7 +680,7 @@ def get_file_renders(self, args):

if 'dawncpp_headers' in targets:
renders.append(
FileRender('webgpu_cpp.h', 'src/include/dawn/webgpu_cpp.h',
FileRender('webnn_cpp.h', 'src/include/dawn/webnn_cpp.h',
[base_params, api_params]))

if 'dawn_proc' in targets:
Expand All @@ -690,30 +690,30 @@ def get_file_renders(self, args):

if 'dawncpp' in targets:
renders.append(
FileRender('webgpu_cpp.cpp', 'src/dawn/webgpu_cpp.cpp',
FileRender('webnn_cpp.cpp', 'src/dawn/webnn_cpp.cpp',
[base_params, api_params]))

if 'emscripten_bits' in targets:
renders.append(
FileRender('webgpu_struct_info.json',
'src/dawn/webgpu_struct_info.json',
FileRender('webnn_struct_info.json',
'src/dawn/webnn_struct_info.json',
[base_params, api_params]))
renders.append(
FileRender('library_webgpu_enum_tables.js',
'src/dawn/library_webgpu_enum_tables.js',
FileRender('library_webnn_enum_tables.js',
'src/dawn/library_webnn_enum_tables.js',
[base_params, api_params]))

if 'mock_webgpu' in targets:
if 'mock_webnn' in targets:
mock_params = [
base_params, api_params, {
'has_callback_arguments': has_callback_arguments
}
]
renders.append(
FileRender('mock_webgpu.h', 'src/dawn/mock_webgpu.h',
FileRender('mock_webnn.h', 'src/dawn/mock_webnn.h',
mock_params))
renders.append(
FileRender('mock_webgpu.cpp', 'src/dawn/mock_webgpu.cpp',
FileRender('mock_webnn.cpp', 'src/dawn/mock_webnn.cpp',
mock_params))

if 'dawn_native_utils' in targets:
Expand All @@ -737,12 +737,12 @@ def get_file_renders(self, args):
'src/dawn_native/ValidationUtils_autogen.cpp',
frontend_params))
renders.append(
FileRender('dawn_native/wgpu_structs.h',
'src/dawn_native/wgpu_structs_autogen.h',
FileRender('dawn_native/wnn_structs.h',
'src/dawn_native/wnn_structs_autogen.h',
frontend_params))
renders.append(
FileRender('dawn_native/wgpu_structs.cpp',
'src/dawn_native/wgpu_structs_autogen.cpp',
FileRender('dawn_native/wnn_structs.cpp',
'src/dawn_native/wnn_structs_autogen.cpp',
frontend_params))
renders.append(
FileRender('dawn_native/ProcTable.cpp',
Expand Down
Loading

0 comments on commit 7cbb3fb

Please sign in to comment.