Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix mapping of unique pointers #1462

Merged
merged 2 commits into from
Jan 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ public class AdagradOptions extends OptimizerCloneableAdagradOptions {
public AdagradOptions(Pointer p) { super(p); }

public AdagradOptions(double lr/*=1e-2*/) { super((Pointer)null); allocate(lr); }
private native void allocate(double lr/*=1e-2*/);
@UniquePtr @Name("std::make_unique<torch::optim::AdagradOptions>") private native void allocate(double lr/*=1e-2*/);
public AdagradOptions() { super((Pointer)null); allocate(); }
private native void allocate();
@UniquePtr @Name("std::make_unique<torch::optim::AdagradOptions>") private native void allocate();
public native @ByRef @NoException(true) DoublePointer lr();
public native @ByRef @NoException(true) DoublePointer lr_decay();
public native @ByRef @NoException(true) DoublePointer weight_decay();
Expand Down
11 changes: 1 addition & 10 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,9 @@ public class AdagradParamState extends OptimizerCloneableAdagradParamState {
static { Loader.load(); }
/** Default native constructor. */
public AdagradParamState() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AdagradParamState(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AdagradParamState(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public AdagradParamState position(long position) {
return (AdagradParamState)super.position(position);
}
@Override public AdagradParamState getPointer(long i) {
return new AdagradParamState((Pointer)this).offsetAddress(i);
}
@UniquePtr @Name("std::make_unique<torch::optim::AdagradParamState>") private native void allocate();

public native @ByRef @NoException(true) Tensor sum();
public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step();
Expand Down
4 changes: 2 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ public class AdamOptions extends OptimizerCloneableAdamOptions {
public AdamOptions(Pointer p) { super(p); }

public AdamOptions(double lr/*=1e-3*/) { super((Pointer)null); allocate(lr); }
private native void allocate(double lr/*=1e-3*/);
@UniquePtr @Name("std::make_unique<torch::optim::AdamOptions>") private native void allocate(double lr/*=1e-3*/);
public AdamOptions() { super((Pointer)null); allocate(); }
private native void allocate();
@UniquePtr @Name("std::make_unique<torch::optim::AdamOptions>") private native void allocate();
public native @ByRef @NoException(true) DoublePointer lr();
public native @Cast("std::tuple<double,double>*") @ByRef @NoException DoublePointer betas();
public native @ByRef @NoException(true) DoublePointer eps();
Expand Down
11 changes: 1 addition & 10 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,9 @@ public class AdamParamState extends OptimizerCloneableAdamParamState {
static { Loader.load(); }
/** Default native constructor. */
public AdamParamState() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AdamParamState(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AdamParamState(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public AdamParamState position(long position) {
return (AdamParamState)super.position(position);
}
@Override public AdamParamState getPointer(long i) {
return new AdamParamState((Pointer)this).offsetAddress(i);
}
@UniquePtr @Name("std::make_unique<torch::optim::AdamParamState>") private native void allocate();

public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step();
public native @ByRef @NoException(true) Tensor exp_avg();
Expand Down
4 changes: 2 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ public class AdamWOptions extends OptimizerCloneableAdamWOptions {
public AdamWOptions(Pointer p) { super(p); }

public AdamWOptions(double lr/*=1e-3*/) { super((Pointer)null); allocate(lr); }
private native void allocate(double lr/*=1e-3*/);
@UniquePtr @Name("std::make_unique<torch::optim::AdamWOptions>") private native void allocate(double lr/*=1e-3*/);
public AdamWOptions() { super((Pointer)null); allocate(); }
private native void allocate();
@UniquePtr @Name("std::make_unique<torch::optim::AdamWOptions>") private native void allocate();
public native @ByRef @NoException(true) DoublePointer lr();
public native @Cast("std::tuple<double,double>*") @ByRef @NoException DoublePointer betas();
public native @ByRef @NoException(true) DoublePointer eps();
Expand Down
11 changes: 1 addition & 10 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,9 @@ public class AdamWParamState extends OptimizerCloneableAdamWParamState {
static { Loader.load(); }
/** Default native constructor. */
public AdamWParamState() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AdamWParamState(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AdamWParamState(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public AdamWParamState position(long position) {
return (AdamWParamState)super.position(position);
}
@Override public AdamWParamState getPointer(long i) {
return new AdamWParamState((Pointer)this).offsetAddress(i);
}
@UniquePtr @Name("std::make_unique<torch::optim::AdamWParamState>") private native void allocate();

public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step();
public native @ByRef @NoException(true) Tensor exp_avg();
Expand Down
13 changes: 2 additions & 11 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMeta.java
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,6 @@ public class AutogradMeta extends AutogradMetaInterface {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AutogradMeta(Pointer p) { super(p); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public AutogradMeta(long size) { super((Pointer)null); allocateArray(size); }
private native void allocateArray(long size);
@Override public AutogradMeta position(long position) {
return (AutogradMeta)super.position(position);
}
@Override public AutogradMeta getPointer(long i) {
return new AutogradMeta((Pointer)this).offsetAddress(i);
}

public native @StdString BytePointer name_(); public native AutogradMeta name_(BytePointer setter);

Expand Down Expand Up @@ -121,10 +112,10 @@ public AutogradMeta(
TensorImpl self_impl/*=nullptr*/,
@Cast("bool") boolean requires_grad/*=false*/,
@ByVal(nullValue = "torch::autograd::Edge()") Edge gradient_edge) { super((Pointer)null); allocate(self_impl, requires_grad, gradient_edge); }
private native void allocate(
@UniquePtr @Name("std::make_unique<torch::autograd::AutogradMeta>") private native void allocate(
TensorImpl self_impl/*=nullptr*/,
@Cast("bool") boolean requires_grad/*=false*/,
@ByVal(nullValue = "torch::autograd::Edge()") Edge gradient_edge);
public AutogradMeta() { super((Pointer)null); allocate(); }
private native void allocate();
@UniquePtr @Name("std::make_unique<torch::autograd::AutogradMeta>") private native void allocate();
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ public class CPUGeneratorImpl extends GeneratorImpl {

// Constructors
public CPUGeneratorImpl(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/) { super((Pointer)null); allocate(seed_in); }
private native void allocate(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/);
@UniquePtr @Name("std::make_unique<at::CPUGeneratorImpl>") private native void allocate(@Cast("uint64_t") long seed_in/*=c10::default_rng_seed_val*/);
public CPUGeneratorImpl() { super((Pointer)null); allocate(); }
private native void allocate();
@UniquePtr @Name("std::make_unique<at::CPUGeneratorImpl>") private native void allocate();

// CPUGeneratorImpl methods
public native @SharedPtr CPUGeneratorImpl clone();
Expand Down
4 changes: 2 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/Dispatcher.java
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@ public class Dispatcher extends Pointer {
*/
// NB: steals the inferred function schema, as we may need to hold on to
// it for a bit until the real schema turns up
public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @Cast({"", "std::unique_ptr<c10::FunctionSchema>&&"}) FunctionSchema inferred_function_schema, @StdString BytePointer debug);
public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @Cast({"", "std::unique_ptr<c10::FunctionSchema>&&"}) FunctionSchema inferred_function_schema, @StdString String debug);
public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @ByVal FunctionSchema inferred_function_schema, @StdString BytePointer debug);
public native @ByVal RegistrationHandleRAII registerImpl(@ByVal OperatorName op_name, @ByVal DispatchKeyOptional dispatch_key, @ByVal KernelFunction kernel, @ByVal CppSignatureOptional cpp_signature, @UniquePtr @ByVal FunctionSchema inferred_function_schema, @StdString String debug);

/**
* Register a new operator by name.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ public class FunctionPostHookVector extends Pointer {

public FunctionPostHook front() { return get(0); }
public FunctionPostHook back() { return get(size() - 1); }
@Index(function = "at") public native @UniquePtr @Cast({"", "std::unique_ptr<torch::autograd::FunctionPostHook>&&"}) FunctionPostHook get(@Cast("size_t") long i);
@Index(function = "at") public native @UniquePtr FunctionPostHook get(@Cast("size_t") long i);

public native @ByVal Iterator begin();
public native @ByVal Iterator end();
Expand All @@ -41,7 +41,7 @@ public Iterator() { }

public native @Name("operator ++") @ByRef Iterator increment();
public native @Name("operator ==") boolean equals(@ByRef Iterator it);
public native @Name("operator *") @UniquePtr @Cast({"", "std::unique_ptr<torch::autograd::FunctionPostHook>&&"}) FunctionPostHook get();
public native @Name("operator *") @UniquePtr @Const FunctionPostHook get();
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ public class FunctionPreHookVector extends Pointer {

public FunctionPreHook front() { return get(0); }
public FunctionPreHook back() { return get(size() - 1); }
@Index(function = "at") public native @UniquePtr @Cast({"", "std::unique_ptr<torch::autograd::FunctionPreHook>&&"}) FunctionPreHook get(@Cast("size_t") long i);
@Index(function = "at") public native @UniquePtr FunctionPreHook get(@Cast("size_t") long i);

public native @ByVal Iterator begin();
public native @ByVal Iterator end();
Expand All @@ -41,7 +41,7 @@ public Iterator() { }

public native @Name("operator ++") @ByRef Iterator increment();
public native @Name("operator ==") boolean equals(@ByRef Iterator it);
public native @Name("operator *") @UniquePtr @Cast({"", "std::unique_ptr<torch::autograd::FunctionPreHook>&&"}) FunctionPreHook get();
public native @Name("operator *") @UniquePtr @Const FunctionPreHook get();
}
}

16 changes: 8 additions & 8 deletions pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ public FunctionSchema(
@StdVector Argument returns,
@Cast("bool") boolean is_vararg/*=false*/,
@Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@StdString BytePointer name,
@StdString BytePointer overload_name,
@StdVector Argument arguments,
Expand All @@ -43,7 +43,7 @@ public FunctionSchema(
@StdString BytePointer overload_name,
@StdVector Argument arguments,
@StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@StdString BytePointer name,
@StdString BytePointer overload_name,
@StdVector Argument arguments,
Expand All @@ -55,7 +55,7 @@ public FunctionSchema(
@StdVector Argument returns,
@Cast("bool") boolean is_vararg/*=false*/,
@Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@StdString String name,
@StdString String overload_name,
@StdVector Argument arguments,
Expand All @@ -67,7 +67,7 @@ public FunctionSchema(
@StdString String overload_name,
@StdVector Argument arguments,
@StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@StdString String name,
@StdString String overload_name,
@StdVector Argument arguments,
Expand All @@ -80,7 +80,7 @@ public FunctionSchema(
@StdVector Argument returns,
@Cast("bool") boolean is_vararg/*=false*/,
@Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@ByVal Symbol name,
@StdString BytePointer overload_name,
@StdVector Argument arguments,
Expand All @@ -92,7 +92,7 @@ public FunctionSchema(
@StdString BytePointer overload_name,
@StdVector Argument arguments,
@StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@ByVal Symbol name,
@StdString BytePointer overload_name,
@StdVector Argument arguments,
Expand All @@ -104,7 +104,7 @@ public FunctionSchema(
@StdVector Argument returns,
@Cast("bool") boolean is_vararg/*=false*/,
@Cast("bool") boolean is_varret/*=false*/) { super((Pointer)null); allocate(name, overload_name, arguments, returns, is_vararg, is_varret); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@ByVal Symbol name,
@StdString String overload_name,
@StdVector Argument arguments,
Expand All @@ -116,7 +116,7 @@ public FunctionSchema(
@StdString String overload_name,
@StdVector Argument arguments,
@StdVector Argument returns) { super((Pointer)null); allocate(name, overload_name, arguments, returns); }
private native void allocate(
@UniquePtr @Name("std::make_unique<c10::FunctionSchema>") private native void allocate(
@ByVal Symbol name,
@StdString String overload_name,
@StdVector Argument arguments,
Expand Down
2 changes: 1 addition & 1 deletion pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,6 @@ public native Value insert(
public Pointer shiftLeft(Pointer out) { return shiftLeft(out, this); }

public native @SharedPtr("torch::jit::Graph") @ByVal Graph copy();
public native @UniquePtr Graph copyUnique();
public native @UniquePtr @ByVal Graph copyUnique();
public native void remapTypes(@Const @ByRef TypeMapper type_map);
}
2 changes: 1 addition & 1 deletion pytorch/src/gen/java/org/bytedeco/pytorch/GraphAttr.java
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ public class GraphAttr extends AttributeValue {
public GraphAttr(Pointer p) { super(p); }

public GraphAttr(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_) { super((Pointer)null); allocate(name, value_); }
private native void allocate(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_);
@UniquePtr @Name("std::make_unique<torch::jit::GraphAttr>") private native void allocate(@ByVal Symbol name, @SharedPtr("torch::jit::Graph") @ByVal Graph value_);
public native @SharedPtr("torch::jit::Graph") @ByRef Graph value();
public native @UniquePtr @ByVal AttributeValue clone();
public native JitAttributeKind kind();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ public class IStreamAdapter extends ReadAdapterInterface {


public IStreamAdapter(@Cast("std::istream*") Pointer istream) { super((Pointer)null); allocate(istream); }
private native void allocate(@Cast("std::istream*") Pointer istream);
@UniquePtr @Name("std::make_unique<caffe2::serialize::IStreamAdapter>") private native void allocate(@Cast("std::istream*") Pointer istream);
public native @Cast("size_t") long size();
public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n, @Cast("const char*") BytePointer what/*=""*/);
public native @Cast("size_t") long read(@Cast("uint64_t") long pos, Pointer buf, @Cast("size_t") long n);
Expand Down
4 changes: 2 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSOptions.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ public class LBFGSOptions extends OptimizerCloneableLBFGSOptions {
public LBFGSOptions(Pointer p) { super(p); }

public LBFGSOptions(double lr/*=1*/) { super((Pointer)null); allocate(lr); }
private native void allocate(double lr/*=1*/);
@UniquePtr @Name("std::make_unique<torch::optim::LBFGSOptions>") private native void allocate(double lr/*=1*/);
public LBFGSOptions() { super((Pointer)null); allocate(); }
private native void allocate();
@UniquePtr @Name("std::make_unique<torch::optim::LBFGSOptions>") private native void allocate();
public native @ByRef @NoException(true) DoublePointer lr();
public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer max_iter();
public native @ByRef @NoException(true) LongOptional max_eval();
Expand Down
11 changes: 1 addition & 10 deletions pytorch/src/gen/java/org/bytedeco/pytorch/LBFGSParamState.java
Original file line number Diff line number Diff line change
Expand Up @@ -23,18 +23,9 @@ public class LBFGSParamState extends OptimizerCloneableLBFGSParamState {
static { Loader.load(); }
/** Default native constructor. */
public LBFGSParamState() { super((Pointer)null); allocate(); }
/** Native array allocator. Access with {@link Pointer#position(long)}. */
public LBFGSParamState(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public LBFGSParamState(Pointer p) { super(p); }
private native void allocate();
private native void allocateArray(long size);
@Override public LBFGSParamState position(long position) {
return (LBFGSParamState)super.position(position);
}
@Override public LBFGSParamState getPointer(long i) {
return new LBFGSParamState((Pointer)this).offsetAddress(i);
}
@UniquePtr @Name("std::make_unique<torch::optim::LBFGSParamState>") private native void allocate();

public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer func_evals();
public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer n_iter();
Expand Down
Loading