diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java index 9efba03c4c7..204b33001fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java @@ -25,9 +25,9 @@ public class AdagradOptions extends OptimizerCloneableAdagradOptions { public AdagradOptions(Pointer p) { super(p); } public AdagradOptions(double lr/*=1e-2*/) { super((Pointer)null); allocate(lr); } - private native void allocate(double lr/*=1e-2*/); + @UniquePtr @Name("std::make_unique") private native void allocate(double lr/*=1e-2*/); public AdagradOptions() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) DoublePointer lr(); public native @ByRef @NoException(true) DoublePointer lr_decay(); public native @ByRef @NoException(true) DoublePointer weight_decay(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java index 841388e06d3..438fd2b517a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java @@ -23,18 +23,9 @@ public class AdagradParamState extends OptimizerCloneableAdagradParamState { static { Loader.load(); } /** Default native constructor. */ public AdagradParamState() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public AdagradParamState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdagradParamState(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public AdagradParamState position(long position) { - return (AdagradParamState)super.position(position); - } - @Override public AdagradParamState getPointer(long i) { - return new AdagradParamState((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) Tensor sum(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java index e455c319d65..fce15a6f16f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java @@ -25,9 +25,9 @@ public class AdamOptions extends OptimizerCloneableAdamOptions { public AdamOptions(Pointer p) { super(p); } public AdamOptions(double lr/*=1e-3*/) { super((Pointer)null); allocate(lr); } - private native void allocate(double lr/*=1e-3*/); + @UniquePtr @Name("std::make_unique") private native void allocate(double lr/*=1e-3*/); public AdamOptions() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) DoublePointer lr(); public native @Cast("std::tuple*") @ByRef @NoException DoublePointer betas(); public native @ByRef @NoException(true) DoublePointer eps(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java index 93ab614cab7..19aa86e7c1f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java @@ -23,18 +23,9 @@ public class AdamParamState extends OptimizerCloneableAdamParamState { static { Loader.load(); } /** Default native constructor. */ public AdamParamState() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public AdamParamState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdamParamState(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public AdamParamState position(long position) { - return (AdamParamState)super.position(position); - } - @Override public AdamParamState getPointer(long i) { - return new AdamParamState((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step(); public native @ByRef @NoException(true) Tensor exp_avg(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java index 5607a72a48f..b266fa1712d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java @@ -25,9 +25,9 @@ public class AdamWOptions extends OptimizerCloneableAdamWOptions { public AdamWOptions(Pointer p) { super(p); } public AdamWOptions(double lr/*=1e-3*/) { super((Pointer)null); allocate(lr); } - private native void allocate(double lr/*=1e-3*/); + @UniquePtr @Name("std::make_unique") private native void allocate(double lr/*=1e-3*/); public AdamWOptions() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) DoublePointer lr(); public native @Cast("std::tuple*") @ByRef @NoException DoublePointer betas(); public native @ByRef @NoException(true) DoublePointer eps(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java index 1cfa5060df3..21131009eab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java @@ -23,18 +23,9 @@ public class AdamWParamState extends OptimizerCloneableAdamWParamState { static { Loader.load(); } /** Default native constructor. */ public AdamWParamState() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public AdamWParamState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AdamWParamState(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public AdamWParamState position(long position) { - return (AdamWParamState)super.position(position); - } - @Override public AdamWParamState getPointer(long i) { - return new AdamWParamState((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step(); public native @ByRef @NoException(true) Tensor exp_avg(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java index a68a8d19f87..11d0ffc6dee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java @@ -32,15 +32,6 @@ public class AutogradMeta extends AutogradMetaInterface { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public AutogradMeta(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public AutogradMeta(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public AutogradMeta position(long position) { - return (AutogradMeta)super.position(position); - } - @Override public AutogradMeta getPointer(long i) { - return new AutogradMeta((Pointer)this).offsetAddress(i); - } public native @StdString BytePointer name_(); public native AutogradMeta name_(BytePointer setter); @@ -121,10 +112,10 @@ public AutogradMeta( TensorImpl self_impl/*=nullptr*/, @Cast("bool") boolean requires_grad/*=false*/, @ByVal(nullValue = "torch::autograd::Edge()") Edge gradient_edge) { super((Pointer)null); allocate(self_impl, requires_grad, gradient_edge); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( TensorImpl self_impl/*=nullptr*/, @Cast("bool") boolean requires_grad/*=false*/, @ByVal(nullValue = "torch::autograd::Edge()") Edge gradient_edge); public AutogradMeta() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java index 92d33064b98..60bb1224c00 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java @@ -26,9 +26,9 @@ public class CPUGeneratorImpl extends GeneratorImpl { // Constructors public CPUGeneratorImpl(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/) { super((Pointer)null); allocate(seed_in); } - private native void allocate(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/); + @UniquePtr @Name("std::make_unique") private native void allocate(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/); public CPUGeneratorImpl() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); // CPUGeneratorImpl methods public native @SharedPtr CPUGeneratorImpl clone(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java index 767c73458af..6de922a6d18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java @@ -129,8 +129,8 @@ public class Dispatcher extends Pointer { */ // NB: steals the inferred function schema, as we may need to hold on to // it for a bit until the real schema turns up - public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionSchema inferred_function_schema, @StdString BytePointer debug); - public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionSchema inferred_function_schema, @StdString String debug); + public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @ByVal FunctionSchema inferred_function_schema, @StdString BytePointer debug); + public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @ByVal FunctionSchema inferred_function_schema, @StdString String debug); /** * Register a new operator by name. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java index 3c560cafa14..353cd2f5974 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPostHookVector.java @@ -31,7 +31,7 @@ public class FunctionPostHookVector extends Pointer { public FunctionPostHook front() { return get(0); } public FunctionPostHook back() { return get(size() - 1); } - @Index(function = "at") public native @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPostHook get(@Cast("size_t") long i); + @Index(function = "at") public native @UniquePtr FunctionPostHook get(@Cast("size_t") long i); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -41,7 +41,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPostHook get(); + public native @Name("operator *") @UniquePtr @Const FunctionPostHook get(); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java index 3a165e5802a..9e90e3ef768 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionPreHookVector.java @@ -31,7 +31,7 @@ public class FunctionPreHookVector extends Pointer { public FunctionPreHook front() { return get(0); } public FunctionPreHook back() { return get(size() - 1); } - @Index(function = "at") public native @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook get(@Cast("size_t") long i); + @Index(function = "at") public native @UniquePtr FunctionPreHook get(@Cast("size_t") long i); public native @ByVal Iterator begin(); public native @ByVal Iterator end(); @@ -41,7 +41,7 @@ public Iterator() { } public native @Name("operator ++") @ByRef Iterator increment(); public native @Name("operator ==") boolean equals(@ByRef Iterator it); - public native @Name("operator *") @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook get(); + public native @Name("operator *") @UniquePtr @Const FunctionPreHook get(); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java index 0a44a739ebf..039bbc12f00 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java @@ -31,7 +31,7 @@ public FunctionSchema( @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @StdString BytePointer name, @StdString BytePointer overload_name, @StdVector Argument arguments, @@ -43,7 +43,7 @@ public FunctionSchema( @StdString BytePointer overload_name, @StdVector Argument arguments, @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @StdString BytePointer name, @StdString BytePointer overload_name, @StdVector Argument arguments, @@ -55,7 +55,7 @@ public FunctionSchema( @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @StdString String name, @StdString String overload_name, @StdVector Argument arguments, @@ -67,7 +67,7 @@ public FunctionSchema( @StdString String overload_name, @StdVector Argument arguments, @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @StdString String name, @StdString String overload_name, @StdVector Argument arguments, @@ -80,7 +80,7 @@ public FunctionSchema( @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @ByVal Symbol name, @StdString BytePointer overload_name, @StdVector Argument arguments, @@ -92,7 +92,7 @@ public FunctionSchema( @StdString BytePointer overload_name, @StdVector Argument arguments, @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @ByVal Symbol name, @StdString BytePointer overload_name, @StdVector Argument arguments, @@ -104,7 +104,7 @@ public FunctionSchema( @StdVector Argument returns, @Cast("bool") boolean is_vararg/*=false*/, @Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @ByVal Symbol name, @StdString String overload_name, @StdVector Argument arguments, @@ -116,7 +116,7 @@ public FunctionSchema( @StdString String overload_name, @StdVector Argument arguments, @StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); } - private native void allocate( + @UniquePtr @Name("std::make_unique") private native void allocate( @ByVal Symbol name, @StdString String overload_name, @StdVector Argument arguments, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java index b41f2ecc98c..62552a9cd08 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java @@ -230,6 +230,6 @@ public native Value insert( public Pointer shiftLeft(Pointer out) { return shiftLeft(out, this); } public native @SharedPtr("torch::jit::Graph") @ByVal Graph copy(); - public native @UniquePtr Graph copyUnique(); + public native @UniquePtr @ByVal Graph copyUnique(); public native void remapTypes(@Const @ByRef TypeMapper type_map); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java index e024b408dc7..2909557b4ad 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java @@ -27,7 +27,7 @@ public class GraphAttr extends AttributeValue { public GraphAttr(Pointer p) { super(p); } public GraphAttr(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_) { super((Pointer)null); allocate(name, value_); } - private native void allocate(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_); + @UniquePtr @Name("std::make_unique") private native void allocate(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_); public native @SharedPtr("torch::jit::Graph") @ByRef Graph value(); public native @UniquePtr @ByVal AttributeValue clone(); public native JitAttributeKind kind(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java index 0d8b14547f7..5b92b82062e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IStreamAdapter.java @@ -26,7 +26,7 @@ public class IStreamAdapter extends ReadAdapterInterface { public IStreamAdapter(@Cast("std::istream*") Pointer istream) { super((Pointer)null); allocate(istream); } - private native void allocate(@Cast("std::istream*") Pointer istream); + @UniquePtr @Name("std::make_unique") private native void allocate(@Cast("std::istream*") Pointer istream); public native @Cast("size_t") long size(); public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n, @Cast("const char*") BytePointer what/*=""*/); public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java index f58c5054ede..2e7444c71de 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java @@ -25,9 +25,9 @@ public class LBFGSOptions extends OptimizerCloneableLBFGSOptions { public LBFGSOptions(Pointer p) { super(p); } public LBFGSOptions(double lr/*=1*/) { super((Pointer)null); allocate(lr); } - private native void allocate(double lr/*=1*/); + @UniquePtr @Name("std::make_unique") private native void allocate(double lr/*=1*/); public LBFGSOptions() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) DoublePointer lr(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer max_iter(); public native @ByRef @NoException(true) LongOptional max_eval(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java index be1a7e7ee3c..3adec958b96 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java @@ -23,18 +23,9 @@ public class LBFGSParamState extends OptimizerCloneableLBFGSParamState { static { Loader.load(); } /** Default native constructor. */ public LBFGSParamState() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public LBFGSParamState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public LBFGSParamState(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public LBFGSParamState position(long position) { - return (LBFGSParamState)super.position(position); - } - @Override public LBFGSParamState getPointer(long i) { - return new LBFGSParamState((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer func_evals(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer n_iter(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java index 18506ddaaf7..17602916df3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMetaInterface.java @@ -21,20 +21,8 @@ @Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class NamedTensorMetaInterface extends Pointer { static { Loader.load(); } - /** Default native constructor. */ - public NamedTensorMetaInterface() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public NamedTensorMetaInterface(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public NamedTensorMetaInterface(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public NamedTensorMetaInterface position(long position) { - return (NamedTensorMetaInterface)super.position(position); - } - @Override public NamedTensorMetaInterface getPointer(long i) { - return new NamedTensorMetaInterface((Pointer)this).offsetAddress(i); - } public native @UniquePtr NamedTensorMetaInterface clone(); public native @Cast("int64_t") long slow_dim(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java index 91e918358d8..f9f2666d981 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Node.java @@ -258,22 +258,22 @@ public class Node extends Pointer { // Hook API //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - public native @Cast("uintptr_t") long add_post_hook(@UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPostHook post_hook); + public native @Cast("uintptr_t") long add_post_hook(@UniquePtr @ByRef(true) FunctionPostHook post_hook); // delete a post hook matching the key public native @Cast("bool") boolean del_post_hook(@Cast("const uintptr_t") long key); public native @ByRef @NoException(true) FunctionPostHookVector post_hooks(); - public native void add_pre_hook(@UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook pre_hook); + public native void add_pre_hook(@UniquePtr @ByRef(true) FunctionPreHook pre_hook); - public native void add_tensor_pre_hook(@UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook pre_hook); + public native void add_tensor_pre_hook(@UniquePtr @ByRef(true) FunctionPreHook pre_hook); public native void add_retains_grad_hook( - @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook pre_hook, + @UniquePtr @ByRef(true) FunctionPreHook pre_hook, int output_idx); - public native @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook pop_retains_grad_hook(int output_idx); + public native @UniquePtr @ByVal FunctionPreHook pop_retains_grad_hook(int output_idx); public native @ByRef @NoException(true) FunctionPreHookVector pre_hooks(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java index b73c4d5258f..2932b8957e7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OperatorHandle.java @@ -58,7 +58,7 @@ public class OperatorHandle extends Pointer { public native @ByVal TagArrayRef getTags(); - public native void setReportErrorCallback_(@UniquePtr SafePyObject callback); + public native void setReportErrorCallback_(@UniquePtr @ByVal SafePyObject callback); public native @Cast("bool") boolean hasTag(Tag tag); public native @Cast("bool") boolean hasTag(@Cast("at::Tag") int tag); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java index 0ec08f22d91..49a2c462af8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerOptions.java @@ -23,20 +23,11 @@ public class OptimizerOptions extends Pointer { static { Loader.load(); } /** Default native constructor. */ public OptimizerOptions() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public OptimizerOptions(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OptimizerOptions(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public OptimizerOptions position(long position) { - return (OptimizerOptions)super.position(position); - } - @Override public OptimizerOptions getPointer(long i) { - return new OptimizerOptions((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); - public native @UniquePtr OptimizerOptions clone(); + public native @UniquePtr @ByVal OptimizerOptions clone(); public native double get_lr(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java index 7c35cfbf859..98390a9a848 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java @@ -34,13 +34,13 @@ public class OptimizerParamGroup extends Pointer { private native void allocate(@Cast({"", "std::vector"}) @StdMove TensorVector params); public OptimizerParamGroup( @Cast({"", "std::vector"}) @StdMove TensorVector params, - @UniquePtr OptimizerOptions options) { super((Pointer)null); allocate(params, options); } + @UniquePtr @ByVal OptimizerOptions options) { super((Pointer)null); allocate(params, options); } private native void allocate( @Cast({"", "std::vector"}) @StdMove TensorVector params, - @UniquePtr OptimizerOptions options); + @UniquePtr @ByVal OptimizerOptions options); public native @Cast("bool") boolean has_options(); public native @ByRef OptimizerOptions options(); - public native void set_options(@UniquePtr OptimizerOptions options); + public native void set_options(@UniquePtr @ByVal OptimizerOptions options); public native @ByRef TensorVector params(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java index fbc4240869d..b19dc6562aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamState.java @@ -24,20 +24,11 @@ public class OptimizerParamState extends Pointer { static { Loader.load(); } /** Default native constructor. */ public OptimizerParamState() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public OptimizerParamState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public OptimizerParamState(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public OptimizerParamState position(long position) { - return (OptimizerParamState)super.position(position); - } - @Override public OptimizerParamState getPointer(long i) { - return new OptimizerParamState((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); - public native @UniquePtr OptimizerParamState clone(); + public native @UniquePtr @ByVal OptimizerParamState clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java index 10ae66a635f..ab67ed44471 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropOptions.java @@ -25,9 +25,9 @@ public class RMSpropOptions extends OptimizerCloneableRMSpropOptions { public RMSpropOptions(Pointer p) { super(p); } public RMSpropOptions(double lr/*=1e-2*/) { super((Pointer)null); allocate(lr); } - private native void allocate(double lr/*=1e-2*/); + @UniquePtr @Name("std::make_unique") private native void allocate(double lr/*=1e-2*/); public RMSpropOptions() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) DoublePointer lr(); public native @ByRef @NoException(true) DoublePointer alpha(); public native @ByRef @NoException(true) DoublePointer eps(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java index 8d3514ae497..4b5d976a03f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RMSpropParamState.java @@ -23,18 +23,9 @@ public class RMSpropParamState extends OptimizerCloneableRMSpropParamState { static { Loader.load(); } /** Default native constructor. */ public RMSpropParamState() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public RMSpropParamState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public RMSpropParamState(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public RMSpropParamState position(long position) { - return (RMSpropParamState)super.position(position); - } - @Override public RMSpropParamState getPointer(long i) { - return new RMSpropParamState((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step(); public native @ByRef @NoException(true) Tensor square_avg(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java index 93ffd0adc70..b23a0186b21 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDOptions.java @@ -25,7 +25,7 @@ public class SGDOptions extends OptimizerCloneableSGDOptions { public SGDOptions(Pointer p) { super(p); } public SGDOptions(double lr) { super((Pointer)null); allocate(lr); } - private native void allocate(double lr); + @UniquePtr @Name("std::make_unique") private native void allocate(double lr); public native @ByRef @NoException(true) DoublePointer lr(); public native @ByRef @NoException(true) DoublePointer momentum(); public native @ByRef @NoException(true) DoublePointer dampening(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java index 7259baf47a1..5e4afd49239 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SGDParamState.java @@ -23,18 +23,9 @@ public class SGDParamState extends OptimizerCloneableSGDParamState { static { Loader.load(); } /** Default native constructor. */ public SGDParamState() { super((Pointer)null); allocate(); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public SGDParamState(long size) { super((Pointer)null); allocateArray(size); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public SGDParamState(Pointer p) { super(p); } - private native void allocate(); - private native void allocateArray(long size); - @Override public SGDParamState position(long position) { - return (SGDParamState)super.position(position); - } - @Override public SGDParamState getPointer(long i) { - return new SGDParamState((Pointer)this).offsetAddress(i); - } + @UniquePtr @Name("std::make_unique") private native void allocate(); public native @ByRef @NoException(true) Tensor momentum_buffer(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java index 81e0bbbb085..b2d95411259 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java @@ -38,9 +38,9 @@ public class SafePyObject extends Pointer { // Steals a reference to data public SafePyObject(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter) { super((Pointer)null); allocate(data, pyinterpreter); } - private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); + @UniquePtr @Name("std::make_unique") private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); public SafePyObject(@ByRef(true) SafePyObject other) { super((Pointer)null); allocate(other); } - private native void allocate(@ByRef(true) SafePyObject other); + @UniquePtr @Name("std::make_unique") private native void allocate(@ByRef(true) SafePyObject other); // In principle this could be copyable if we add an incref to PyInterpreter // but for now it's easier to just disallow it. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java index 460178b28de..ae6076fa37b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIterator.java @@ -23,21 +23,12 @@ public class TensorIterator extends TensorIteratorBase { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TensorIterator(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public TensorIterator(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public TensorIterator position(long position) { - return (TensorIterator)super.position(position); - } - @Override public TensorIterator getPointer(long i) { - return new TensorIterator((Pointer)this).offsetAddress(i); - } public TensorIterator() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); // Slicing is OK, TensorIterator guaranteed NOT to have any fields public TensorIterator(@Const @ByRef TensorIteratorBase iter) { super((Pointer)null); allocate(iter); } - private native void allocate(@Const @ByRef TensorIteratorBase iter); + @UniquePtr @Name("std::make_unique") private native void allocate(@Const @ByRef TensorIteratorBase iter); // #define TORCH_DISALLOW_TEMPORARIES(methodname) // TORCH_DISALLOW_TEMPORARIES_IMPL(methodname, static) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java index 70de45d94d3..add02ce3b13 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIteratorBase.java @@ -108,7 +108,7 @@ public class TensorIteratorBase extends MetaBase { /** Splits this TensorIterator into two iterators. Together they iterate over * the entire operation. Used by {@code with_32bit_indexing()}. */ - public native @UniquePtr TensorIterator split(int dim); + public native @UniquePtr @ByVal TensorIterator split(int dim); /** Returns the dimension with the largest extent: (size[dim]-1) * stride[dim] */ public native int get_dim_to_split(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index 9cbb7c23035..919c01b0bc7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -15140,7 +15140,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::autograd::impl") public static native void add_hook( @Const @ByRef TensorBase arg0, - @UniquePtr @Cast({"", "std::unique_ptr&&"}) FunctionPreHook hook); + @UniquePtr @ByVal FunctionPreHook hook); @Namespace("torch::autograd::impl") public static native @ByRef FunctionPreHookVector hooks(@Cast("const torch::autograd::Variable*") @ByRef Tensor arg0); @Namespace("torch::autograd::impl") public static native void clear_hooks(@Const @ByRef TensorBase arg0); diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index e609bea0b9a..55b552881fc 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -347,10 +347,6 @@ public void map(InfoMap infoMap) { .put(new Info("c10::ClassType").purify().pointerTypes("ClassType")) // Issue #669 .put(new Info("c10::EnumType").purify().pointerTypes("EnumType")) // Issue #669 .put(new Info("c10::NamedType").purify().pointerTypes("NamedType")) // Issue #669 - // See comments in PR#668 about a const-agnostic adapter - .put(new Info("std::unique_ptr").annotations("@UniquePtr") - .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionSchema") - .pointerTypes("FunctionSchema")) .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorMaybeOwned").pointerTypes("TensorMaybeOwned")) .put(new Info("c10::MaybeOwned").valueTypes("@Cast({\"\", \"c10::MaybeOwned&&\"}) @StdMove TensorBaseMaybeOwned").pointerTypes("TensorBaseMaybeOwned")) .put(new Info("at::InferExpandGeometryResult").pointerTypes("DimVectorInferExpandGeometryResult")) @@ -636,7 +632,7 @@ public void map(InfoMap infoMap) { "std::vector >").pointerTypes("FunctionPostHookVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("DefVector").define()) .put(new Info("const std::vector", "std::vector").pointerTypes("PropertyVector").define()) - .put(new Info("const std::vector", "std::vector").pointerTypes("OptimizerParamGroupVector").define()) + .put(new Info("const std::vector", "std::vector").pointerTypes("OptimizerParamGroupVector").define()) // OptimizerParamGroup::operator= erased .put(new Info("std::vector").pointerTypes("FunctionVector").define()) .put(new Info("std::vector >").pointerTypes("GraphVector").define()) .put(new Info("std::vector >").pointerTypes("OperatorVector").define()) @@ -1700,31 +1696,6 @@ public void map(InfoMap infoMap) { mapModule(infoMap, "TransformerDecoder"); mapModule(infoMap, "Transformer"); - infoMap.put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdagradOptions")) - .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdagradParamState")) - .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdamOptions")) - .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdamParamState")) - .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableAdamWOptions")) - .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableAdamWParamState")) - .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableLBFGSOptions")) - .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableLBFGSParamState")) - .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableRMSpropOptions")) - .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableRMSpropParamState")) - .put(new Info("torch::optim::OptimizerCloneableOptions", - "torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneableSGDOptions")) - .put(new Info("torch::optim::OptimizerCloneableParamState", - "torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneableSGDParamState")) - ; //// AnyModule, AnyValue and Sequential infoMap @@ -1787,7 +1758,6 @@ public void map(InfoMap infoMap) { new PointerInfo("torch::jit::Resolver"), new PointerInfo("c10::ClassType"), new PointerInfo("c10::TensorType").otherCppNames("c10::TensorTypePtr", "at::TensorTypePtr", "torch::TensorTypePtr"), - new PointerInfo("torch::autograd::FunctionPreHook"), new PointerInfo("torch::nn::Module"), new PointerInfo("const at::functorch::FuncTorchTLSBase"), new PointerInfo("const torch::jit::CompilationUnit"), @@ -1798,16 +1768,43 @@ public void map(InfoMap infoMap) { //// @UniquePtr + for (String opt: new String[] { "Adagrad", "Adam", "AdamW", "LBFGS", "RMSprop", "SGD" }) { + infoMap + .put(new Info("torch::optim::" + opt + "Options", "torch::optim::" + opt + "ParamState")) // Help qualification + .put(new Info("torch::optim::OptimizerCloneableOptions").pointerTypes("OptimizerCloneable" + opt + "Options")) + .put(new Info("torch::optim::OptimizerCloneableParamState").pointerTypes("OptimizerCloneable" + opt + "ParamState")) + ; + new PointerInfo("torch::optim::" + opt + "Options").makeUnique(infoMap); + new PointerInfo("torch::optim::OptimizerCloneableParamState").javaBaseName("OptimizerCloneable" + opt + "AdagradParamState").makeUnique(infoMap); + new PointerInfo("torch::optim::OptimizerCloneableOptions").javaBaseName("OptimizerCloneable" + opt + "Options").makeUnique(infoMap); + new PointerInfo("torch::optim::" + opt + "Options").makeUnique(infoMap); + new PointerInfo("torch::optim::" + opt + "ParamState").makeUnique(infoMap); + } + for (PointerInfo pi : new PointerInfo[]{ + new PointerInfo("torch::optim::OptimizerOptions"), + new PointerInfo("torch::optim::OptimizerParamState"), + new PointerInfo("torch::autograd::AutogradMeta"), + new PointerInfo("torch::jit::GraphAttr"), + new PointerInfo("torch::jit::Graph"), + new PointerInfo("c10::NamedTensorMeta"), + new PointerInfo("c10::FunctionSchema"), + new PointerInfo("c10::SafePyObject"), + new PointerInfo("at::CPUGeneratorImpl"), + new PointerInfo("at::TensorIterator"), + new PointerInfo("caffe2::serialize::IStreamAdapter"), + new PointerInfo("torch::autograd::FunctionPreHook"), + new PointerInfo("torch::autograd::FunctionPostHook"), + // Other classes passed as unique ptr ar abstract, so not instantiated from Java: + // ReadAdapterInterface, PostAccumulateGradHook, FunctionPreHook, FunctionPostHook, FuncTorchTLSBase, AutogradMetaInterface, + // GeneratorImpl, OpRegistrationListener, AttributeValue + }) { + pi.makeUnique(infoMap); + } infoMap - .put(new Info("std::unique_ptr").annotations("@UniquePtr") - .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionPreHook") - .pointerTypes("FunctionPreHook")) - .put(new Info("std::unique_ptr").annotations("@UniquePtr") - .valueTypes("@Cast({\"\", \"std::unique_ptr&&\"}) FunctionPostHook") - .pointerTypes("FunctionPostHook")) - .put(new Info("std::unique_ptr", "Ptr").annotations("@UniquePtr").pointerTypes("AttributeValue")) + .put(new Info("std::unique_ptr", "torch::jit::GraphAttr::Ptr").annotations("@UniquePtr").pointerTypes("AttributeValue")) // Ptr is really defined in AttributeValue (superclass of GraphAttr). But Parser doesn't find it. + .put(new Info("torch::autograd::AutogradMeta::post_acc_grad_hooks_").annotations("@UniquePtr", "@Cast({\"\", \"\", \"std::unique_ptr&&\"})")) // See JavaCPP Issue #717 ; - infoMap.put(new Info("torch::autograd::AutogradMeta::post_acc_grad_hooks_").annotations("@UniquePtr", "@Cast({\"\", \"\", \"std::unique_ptr&&\"})")); // See JavaCPP Issue #717 + /* TODO: see how to map these, if needed and meant to be part of API */ infoMap.put(new Info("c10::MaybeOwnedTraitsGenericImpl >::assignBorrow", @@ -2395,7 +2392,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset + "public native @ByVal Tensor step();\n")); - // Abstract classes because parent class is abstract, and not detected as such by Parser. + // Abstract classes not detected as such by Parser (eg because parent class is abstract). String[] abstracts = new String[]{ "torch::nn::InstanceNormImpl<1,torch::nn::InstanceNorm1dImpl>", "torch::nn::InstanceNormImpl<2,torch::nn::InstanceNorm2dImpl>", @@ -2408,7 +2405,10 @@ We need either to put an annotation info on each member, or javaName("@NoOffset for (String a : abstracts) { infoMap.getFirst(a, false).purify(); } - infoMap.put(new Info("at::TensorIteratorBase").purify()); + infoMap.put(new Info( + "at::TensorIteratorBase", + "c10::NamedTensorMetaInterface" + ).purify()); //// Function pointers @@ -2713,6 +2713,18 @@ void makeShared(InfoMap infoMap) { String n2 = n.equals("torch::nn::Module") ? "JavaCPP_torch_0003a_0003ann_0003a_0003aModule" : n; infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@SharedPtr", "@Name(\"std::make_shared<" + n2 + ">\")")); } + + void makeUnique(InfoMap infoMap) { + // The default info in infoMap is not enough for classes that are elements for containers like vector> + String[] cppNames = new String[argumentNames.length + otherCppNames.length]; + int i = 0; + for (String n : argumentNames) cppNames[i++] = template("std::unique_ptr", n); + for (String n : otherCppNames) cppNames[i++] = n; + infoMap.put(new Info(cppNames).annotations("@UniquePtr").pointerTypes(javaBaseName)); + + String n = argumentNames[0].substring(argumentNames[0].lastIndexOf(' ') + 1); // Remove possible const + infoMap.put(new Info(n + n.substring(n.lastIndexOf("::"))).annotations("@UniquePtr", "@Name(\"std::make_unique<" + n + ">\")")); + } } @Namespace("std") public static native @MemberGetter @ByRef @Cast("std::istream*") Pointer cin(); diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java index 8bcedbe130a..b31b04da97b 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Interpreter.java @@ -14,15 +14,6 @@ public class Interpreter extends Pointer { static { Loader.load(); } /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public Interpreter(Pointer p) { super(p); } - /** Native array allocator. Access with {@link Pointer#position(long)}. */ - public Interpreter(long size) { super((Pointer)null); allocateArray(size); } - private native void allocateArray(long size); - @Override public Interpreter position(long position) { - return (Interpreter)super.position(position); - } - @Override public Interpreter getPointer(long i) { - return new Interpreter((Pointer)this).offsetAddress(i); - } // Instantiate an interpreter. All errors associated with reading and // processing this model will be forwarded to the error_reporter object. @@ -32,9 +23,9 @@ public class Interpreter extends Pointer { // WARNING: Use of this constructor outside of an InterpreterBuilder is not // recommended. public Interpreter(ErrorReporter error_reporter/*=tflite::DefaultErrorReporter()*/) { super((Pointer)null); allocate(error_reporter); } - private native void allocate(ErrorReporter error_reporter/*=tflite::DefaultErrorReporter()*/); + @UniquePtr @Name("std::make_unique") private native void allocate(ErrorReporter error_reporter/*=tflite::DefaultErrorReporter()*/); public Interpreter() { super((Pointer)null); allocate(); } - private native void allocate(); + @UniquePtr @Name("std::make_unique") private native void allocate(); // Interpreters are not copyable as they have non-trivial memory semantics. diff --git a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java index ee39aa071e6..cae8255bba9 100644 --- a/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java +++ b/tensorflow-lite/src/gen/java/org/bytedeco/tensorflowlite/Subgraph.java @@ -25,7 +25,7 @@ public Subgraph(ErrorReporter error_reporter, @Cast("tflite::resource::ResourceIDMap*") StringIntMap resource_ids, @Cast("tflite::resource::InitializationStatusMap*") IntResourceBaseMap initialization_status_map, int subgraph_index/*=kInvalidSubgraphIndex*/) { super((Pointer)null); allocate(error_reporter, external_contexts, subgraphs, resources, resource_ids, initialization_status_map, subgraph_index); } - private native void allocate(ErrorReporter error_reporter, + @UniquePtr @Name("std::make_unique") private native void allocate(ErrorReporter error_reporter, @Cast("TfLiteExternalContext**") PointerPointer external_contexts, SubgraphVector subgraphs, @Cast("tflite::resource::ResourceMap*") IntResourceBaseMap resources, @@ -38,7 +38,7 @@ public Subgraph(ErrorReporter error_reporter, @Cast("tflite::resource::ResourceMap*") IntResourceBaseMap resources, @Cast("tflite::resource::ResourceIDMap*") StringIntMap resource_ids, @Cast("tflite::resource::InitializationStatusMap*") IntResourceBaseMap initialization_status_map) { super((Pointer)null); allocate(error_reporter, external_contexts, subgraphs, resources, resource_ids, initialization_status_map); } - private native void allocate(ErrorReporter error_reporter, + @UniquePtr @Name("std::make_unique") private native void allocate(ErrorReporter error_reporter, @ByPtrPtr TfLiteExternalContext external_contexts, SubgraphVector subgraphs, @Cast("tflite::resource::ResourceMap*") IntResourceBaseMap resources, @@ -51,7 +51,7 @@ public Subgraph(ErrorReporter error_reporter, @Cast("tflite::resource::ResourceIDMap*") StringIntMap resource_ids, @Cast("tflite::resource::InitializationStatusMap*") IntResourceBaseMap initialization_status_map, int subgraph_index/*=kInvalidSubgraphIndex*/) { super((Pointer)null); allocate(error_reporter, external_contexts, subgraphs, resources, resource_ids, initialization_status_map, subgraph_index); } - private native void allocate(ErrorReporter error_reporter, + @UniquePtr @Name("std::make_unique") private native void allocate(ErrorReporter error_reporter, @ByPtrPtr TfLiteExternalContext external_contexts, SubgraphVector subgraphs, @Cast("tflite::resource::ResourceMap*") IntResourceBaseMap resources, @@ -63,7 +63,7 @@ private native void allocate(ErrorReporter error_reporter, // Subgraphs should be movable but not copyable. public Subgraph(@StdMove Subgraph arg0) { super((Pointer)null); allocate(arg0); } - private native void allocate(@StdMove Subgraph arg0); + @UniquePtr @Name("std::make_unique") private native void allocate(@StdMove Subgraph arg0); // Provide a list of tensor indexes that are inputs to the model. diff --git a/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java b/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java index b5d75e5f938..e0d38f01f3d 100644 --- a/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java +++ b/tensorflow-lite/src/main/java/org/bytedeco/tensorflowlite/presets/tensorflowlite.java @@ -170,6 +170,10 @@ public void map(InfoMap infoMap) { .put(new Info("tflite::impl::Interpreter::typed_output_tensor").javaNames("typed_output_tensor_double")) .put(new Info("tflite::impl::Interpreter::typed_output_tensor").javaNames("typed_output_tensor_bool")) .put(new Info("tflite::impl::Interpreter::typed_output_tensor").javaNames("typed_input_tensor_float16")) + + // Classes passed to some native functions as unique_ptr and that can be allocated Java-side + .put(new Info("tflite::impl::Interpreter::Interpreter").annotations("@UniquePtr", "@Name(\"std::make_unique\")")) + .put(new Info("tflite::Subgraph::Subgraph").annotations("@UniquePtr", "@Name(\"std::make_unique\")")) ; } }