diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..6b45a59 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,54 @@ +name: Build all packages + +on: + push: + branches: + - main + workflow_dispatch: + pull_request: + branches: + - main +# on: ["push"] + +jobs: + build: + strategy: + matrix: + include: + - { target: linux-64, os: ubuntu-latest } + # - { target: osx-arm64, os: macos-14 } # mac runner crashes on mojo test + fail-fast: false + + runs-on: ${{ matrix.os }} + timeout-minutes: 5 + + defaults: + run: + shell: bash + + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Install magic + run: | + curl -ssL https://magic.modular.com | bash + + - name: Build package for target platform + env: + TARGET_PLATFORM: ${{ matrix.target }} + PREFIX_API_KEY: ${{ secrets.PREFIX_API_KEY }} + CONDA_BLD_PATH: ${{ runner.workspace }}/.rattler + + run: | + source $HOME/.bash_profile + + # Temporary method to fetch the rattler binary. + RATTLER_BINARY="rattler-build-aarch64-apple-darwin" + if [[ $TARGET_PLATFORM == "linux-64" ]]; then RATTLER_BINARY="rattler-build-x86_64-unknown-linux-musl"; fi + curl -SL --progress-bar https://github.com/prefix-dev/rattler-build/releases/latest/download/${RATTLER_BINARY} -o rattler-build + chmod +x rattler-build + + # Build and push + magic run build --target-platform=$TARGET_PLATFORM + magic run publish diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 48428e0..0b1f0e1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,23 +1,39 @@ name: Run Tests -on: ["push"] +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +permissions: + contents: read + pull-requests: read jobs: test: - runs-on: ubuntu-latest - environment: basic + name: with ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + # os: ["ubuntu-latest", "macos-14"] # mac tests just hang for some reason? + os: ["ubuntu-latest"] + + runs-on: ${{ matrix.os }} + timeout-minutes: 5 + + defaults: + run: + shell: bash + steps: - - name: Check out repository code - uses: actions/checkout@v2 - - name: Install dependencies + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Install magic run: | - curl https://get.modular.com | MODULAR_AUTH=${{ secrets.MODULAR_AUTH }} sh - - modular auth ${{ secrets.MODULAR_AUTH }} - modular install mojo - pip install pytest - pip install git+https://github.com/guidorice/mojo-pytest.git - - name: Unit Tests + curl -ssL https://magic.modular.com | bash + + - name: Run tests run: | - export MODULAR_HOME="/home/runner/.modular" - export PATH="/home/runner/.modular/pkg/packages.modular.com_mojo/bin:$PATH" - pytest + source $HOME/.bash_profile + magic run tests + magic run benchmarks diff --git a/.gitignore b/.gitignore index 80bc7cb..3b9cbd2 100644 --- a/.gitignore +++ b/.gitignore @@ -161,3 +161,19 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ + +# pixi environments +.pixi +*.egg-info + +# magic environments +.magic + +# Rattler +output + +# Mojo +**/*.mojopkg + +# VSCode +.vscode diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 310b24b..5d8d8c1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,7 +9,7 @@ repos: hooks: - id: mojo-format name: mojo-format - entry: mojo format -l 120 + entry: magic run mojo format -l 120 language: system files: '\.(mojo|🔥)$' stages: [commit] diff --git a/CHANGELOG.md b/CHANGELOG.md index f22d166..e3fc3ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] - yyyy-mm-dd +## [0.1.8] - 2024-09-13 + +- Lot's of changes since Mojo 24.5. Sorry, I don't have a more granualar changelog! + ## [0.0.2] - 2024-06-19 ### Added diff --git a/README.md b/README.md index 4e9ea08..782b131 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,12 @@ Experiments in porting over Golang stdlib into Mojo and extra goodies that make use of it. It will not always be a 1:1 port, it's more so code inspired by the Golang stdlib and the Mojo community's code. This is not intended to be a full port, but rather a learning exercise and a way to experiment with Mojo's capabilities. Please feel free to contribute or use this as a starting point for your own projects! The codebase will remain in flux and will evolve with Mojo as future releases are created. +## Installation + +1. First, you'll need to configure your `mojoproject.toml` file to include my Conda channel. Add `"https://repo.prefix.dev/mojo-community"` to the list of channels. +2. Next, add `gojo` to your project's dependencies by running `magic add gojo`. +3. Finally, run `magic install` to install in `gojo`. You should see the `.mojopkg` files in `$CONDA_PREFIX/lib/mojo/`. + ## Projects that use Gojo ### My projects @@ -46,231 +52,7 @@ All of these packages are partially implemented and do not support unicode chara ## Usage -Some basic usage examples. These examples may fall out of sync, so please check out the tests for usage of the various packages! - -You can copy over the modules you want to use from the `gojo` or `goodies` directories, or you can build the package by running: -For `gojo`: `mojo package gojo -I .` - -`bufio.Scanner` - -```mojo -from tests.wrapper import MojoTest -from gojo.bytes import buffer -from gojo.io import FileWrapper -from gojo.bufio import Reader, Scanner, scan_words, scan_bytes - - -fn test_scan_words() raises: - var test = MojoTest("Testing scan_words") - - # Create a reader from a string buffer - var s: String = "Testing this string!" - var buf = buffer.new_buffer(s) - var r = Reader(buf^) - - # Create a scanner from the reader - var scanner = Scanner(r^) - scanner.split = scan_words - - var expected_results = List[String]() - expected_results.append("Testing") - expected_results.append("this") - expected_results.append("string!") - var i = 0 - - while scanner.scan(): - test.assert_equal(scanner.current_token(), expected_results[i]) - i += 1 -``` - -`bufio.Reader` - -```mojo -from tests.wrapper import MojoTest -from gojo.bytes import buffer -from gojo.builtins.bytes import to_string -from gojo.bufio import Reader, Scanner, scan_words, scan_bytes, Writer -from gojo.io import read_all, FileWrapper -from gojo.strings import StringBuilder - - -fn test_read(): - var test = MojoTest("Testing bufio.Reader.read") - - # Create a reader from a string buffer - var s: String = "Hello" - var buf = buffer.new_buffer(s) - var reader = Reader(buf^) - - # Read the buffer into List[UInt8] and then add more to List[UInt8] - var dest = List[UInt8](capacity=256) - _ = reader.read(dest) - dest.extend(String(" World!").as_bytes()) - - test.assert_equal(to_string(dest), "Hello World!") -``` - -`bytes.Buffer` - -```mojo -from tests.wrapper import MojoTest -from gojo.bytes import new_buffer -from gojo.bytes.buffer import Buffer - - -fn test_read() raises: - var test = MojoTest("Testing bytes.Buffer.read") - var s: String = "Hello World!" - var buf = new_buffer(s) - var dest = List[UInt8](capacity=16) - _ = buf.read(dest) - dest.append(0) - test.assert_equal(String(dest), s) - - -fn test_write() raises: - var test = MojoTest("Testing bytes.Buffer.write") - var b = List[UInt8](capacity=16) - var buf = new_buffer(b^) - _ = buf.write(String("Hello World!").as_bytes_slice()) - test.assert_equal(str(buf), String("Hello World!")) -``` - -`bytes.Reader` - -```mojo -from tests.wrapper import MojoTest -from gojo.bytes import reader, buffer -import gojo.io - - -fn test_read() raises: - var test = MojoTest("Testing bytes.Reader.read") - var reader = reader.new_reader("0123456789") - var dest = List[UInt8](capacity=16) - _ = reader.read(dest) - dest.append(0) - test.assert_equal(String(dest), "0123456789") - - # Test negative seek - alias NEGATIVE_POSITION_ERROR = "bytes.Reader.seek: negative position" - var position: Int - var err: Error - position, err = reader.seek(-1, io.SEEK_START) - - if not err: - raise Error("Expected error not raised while testing negative seek.") - - if str(err) != NEGATIVE_POSITION_ERROR: - raise err - - test.assert_equal(str(err), NEGATIVE_POSITION_ERROR) -``` - -`io.FileWrapper` - -```mojo -from tests.wrapper import MojoTest -from gojo.io import read_all, FileWrapper - - -fn test_read() raises: - var test = MojoTest("Testing FileWrapper.read") - var file = FileWrapper("tests/data/test.txt", "r") - var dest = List[UInt8](capacity=16) - _ = file.read(dest) - dest.append(0) - test.assert_equal(String(dest), "12345") -``` - -`io.STDWriter` - -```mojo -from tests.wrapper import MojoTest -from gojo.syscall import FD -from gojo.io import STDWriter - - -fn test_writer() raises: - var test = MojoTest("Testing STDWriter.write") - var writer = STDWriter[FD.STDOUT]() - _ = writer.write_string("") -``` - -`fmt.sprintf` - -```mojo -from tests.wrapper import MojoTest -from gojo.fmt import sprintf, printf - - -fn test_sprintf() raises: - var test = MojoTest("Testing sprintf") - var s = sprintf( - "Hello, %s. I am %d years old. More precisely, I am %f years old. It is %t that I like Mojo!", - String("world"), - 29, - Float64(29.5), - True, - ) - test.assert_equal( - s, - "Hello, world. I am 29 years old. More precisely, I am 29.5 years old. It is True that I like Mojo!", - ) - - s = sprintf("This is a number: %d. In base 16: %x. In base 16 upper: %X.", 42, 42, 42) - test.assert_equal(s, "This is a number: 42. In base 16: 2a. In base 16 upper: 2A.") - - s = sprintf("Hello %s", String("world").as_bytes()) - test.assert_equal(s, "Hello world") -``` - -`strings.Reader` - -```mojo -from tests.wrapper import MojoTest -from gojo.strings import StringBuilder, Reader, new_reader -import gojo.io - - -fn test_read() raises: - var test = MojoTest("Testing strings.Reader.read") - var example: String = "Hello, World!" - var reader = new_reader("Hello, World!") - - # Test reading from the reader. - var buffer = List[UInt8](capacity=16) - var bytes_read = reader.read(buffer) - buffer.append(0) - - test.assert_equal(bytes_read[0], len(example)) - test.assert_equal(String(buffer), "Hello, World!") -``` - -`strings.StringBuilder` - -```mojo -from tests.wrapper import MojoTest -from gojo.strings import StringBuilder - -fn test_string_builder() raises: - var test = MojoTest("Testing strings.StringBuilder") - - # Create a string from the builder by writing strings to it. - var builder = StringBuilder() - - for i in range(3): - _ = builder.write_string("Lorem ipsum dolor sit amet ") - - test.assert_equal( - str(builder), - ( - "Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor" - " sit amet " - ), - ) - -``` +Please check out the `test`, `examples`, and `benchmarks` directories for usage of the various packages! ## Sharp Edges & Bugs diff --git a/benchmarks/scanner.mojo b/benchmarks/scanner.mojo new file mode 100644 index 0000000..a97e7db --- /dev/null +++ b/benchmarks/scanner.mojo @@ -0,0 +1,123 @@ +import benchmark +import gojo.bufio +import gojo.bytes +import gojo.strings +import testing + + +alias FIRE = "🔥" +alias NEWLINE = "\n" +alias CARRIAGE_RETURN = "\r" +alias SPACE = " " + + +fn benchmark_scan_runes[batches: Int]() -> None: + var builder = strings.StringBuilder(capacity=batches * 4) + for _ in range(batches): + _ = builder.write_string(FIRE) + + var scanner = bufio.Scanner[bufio.scan_runes](bytes.Buffer(buf=str(builder).as_bytes()), capacity=batches) + while scanner.scan(): + _ = scanner.current_token() + + +fn benchmark_scan_words[batches: Int]() -> None: + var builder = strings.StringBuilder(capacity=batches * 5) + for _ in range(batches): + _ = builder.write_string(FIRE) + _ = builder.write_string(SPACE) + + var scanner = bufio.Scanner[bufio.scan_words](bytes.Buffer(str(builder)), capacity=batches) + while scanner.scan(): + _ = scanner.current_token() + + +fn benchmark_scan_lines[batches: Int]() -> None: + var builder = strings.StringBuilder(capacity=batches * 5) + for _ in range(batches): + _ = builder.write_string(FIRE) + _ = builder.write_string(NEWLINE) + + var scanner = bufio.Scanner(bytes.Buffer(str(builder)), capacity=batches) + while scanner.scan(): + _ = scanner.current_token() + + +fn benchmark_scan_bytes[batches: Int]() -> None: + var builder = strings.StringBuilder(capacity=batches) + for _ in range(batches): + _ = builder.write_string(SPACE) + + var scanner = bufio.Scanner[bufio.scan_bytes](bytes.Buffer(str(builder)), capacity=batches) + while scanner.scan(): + _ = scanner.current_token() + + +fn benchmark_newline_split[batches: Int]() -> None: + var builder = strings.StringBuilder(capacity=batches * 5) + for _ in range(batches): + _ = builder.write_string(FIRE) + _ = builder.write_string(NEWLINE) + + try: + var lines = str(builder).split(NEWLINE) + for line in lines: + _ = line + except e: + pass + + +fn main(): + # There's a time penalty for building the input text, for now. + print("Running benchmark_scan_runes - 100") + var report = benchmark.run[benchmark_scan_runes[100]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_runes - 1000") + report = benchmark.run[benchmark_scan_runes[1000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_runes - 10000") + report = benchmark.run[benchmark_scan_runes[10000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_words - 100") + report = benchmark.run[benchmark_scan_words[100]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_words - 1000") + report = benchmark.run[benchmark_scan_words[1000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_words - 10000") + report = benchmark.run[benchmark_scan_words[10000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_lines - 100") + report = benchmark.run[benchmark_scan_lines[100]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_lines - 1000") + report = benchmark.run[benchmark_scan_lines[1000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_lines - 10000") + report = benchmark.run[benchmark_scan_lines[10000]](max_iters=20) + report.print(benchmark.Unit.ms) + + # To compare against scan lines + print("Running benchmark_newline_split - 10000") + report = benchmark.run[benchmark_newline_split[10000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_bytes - 100") + report = benchmark.run[benchmark_scan_bytes[100]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_bytes - 1000") + report = benchmark.run[benchmark_scan_bytes[1000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_scan_bytes - 10000") + report = benchmark.run[benchmark_scan_bytes[10000]](max_iters=20) + report.print(benchmark.Unit.ms) diff --git a/benchmarks/std_writer.mojo b/benchmarks/std_writer.mojo new file mode 100644 index 0000000..db784c5 --- /dev/null +++ b/benchmarks/std_writer.mojo @@ -0,0 +1,17 @@ +# fn test_std_writer_speed() raises: +# """STDWriter is roughly 6-7x faster currently.""" +# var print_start_time = now() +# for i in range(1, 10000): +# print(i) +# var print_execution_time = now() - print_start_time + +# # Create stdout writer +# var writer = STDWriter(1) +# var writer_start_time = now() +# for i in range(1, 10000): +# _ = writer.write_string(str(i)) +# var writer_execution_time = now() - writer_start_time + +# print("\n\nprint execution time (s): " + str((print_execution_time) / 1e9)) +# print("writer execution time (s): " + str((writer_execution_time) / 1e9)) +# print("Writer is ", str(print_execution_time // writer_execution_time) + "x faster") diff --git a/benchmarks/string_builder.mojo b/benchmarks/string_builder.mojo new file mode 100644 index 0000000..423db1f --- /dev/null +++ b/benchmarks/string_builder.mojo @@ -0,0 +1,70 @@ +import benchmark +from gojo.strings import StringBuilder +from gojo.bytes.buffer import Buffer + +alias SAMPLE_TEXT = """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.""" + + +fn benchmark_concat[batches: Int](): + var vec = List[String](capacity=batches * len(SAMPLE_TEXT)) + for _ in range(batches): + vec.append(SAMPLE_TEXT) + + var concat_output: String = "" + for i in range(len(vec)): + concat_output += vec[i] + _ = concat_output + + +fn benchmark_string_builder[batches: Int](): + var new_builder = StringBuilder(capacity=batches * len(SAMPLE_TEXT)) + for _ in range(batches): + _ = new_builder.write_string(SAMPLE_TEXT) + _ = str(new_builder) + + +fn benchmark_bytes_buffer[batches: Int](): + var buffer = Buffer(capacity=batches * len(SAMPLE_TEXT)) + for _ in range(batches): + _ = buffer.write_string(SAMPLE_TEXT) + _ = str(buffer) + + +fn main(): + # There's a performance penalty for benchmark concat bc it also includes + # the building of the list of strings it concatenates. Trying to build it at comptime takes a loooong time. + print("Running benchmark_concat - 100 batches") + var report = benchmark.run[benchmark_concat[100]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_string_builder - 100 batches") + report = benchmark.run[benchmark_string_builder[100]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_bytes_buffer - 100 batches") + report = benchmark.run[benchmark_bytes_buffer[100]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_concat - 1000 batches") + report = benchmark.run[benchmark_concat[1000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_string_builder - 1000 batches") + report = benchmark.run[benchmark_string_builder[1000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_bytes_buffer - 1000 batches") + report = benchmark.run[benchmark_bytes_buffer[1000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_concat - 10000 batches") + report = benchmark.run[benchmark_concat[10000]](max_iters=2) + report.print(benchmark.Unit.ms) + + print("Running benchmark_string_builder - 10000 batches") + report = benchmark.run[benchmark_string_builder[10000]](max_iters=20) + report.print(benchmark.Unit.ms) + + print("Running benchmark_bytes_buffer - 10000 batches") + report = benchmark.run[benchmark_bytes_buffer[10000]](max_iters=20) + report.print(benchmark.Unit.ms) diff --git a/benchmarks/test_performance.mojo b/benchmarks/test_performance.mojo deleted file mode 100644 index fd2a8f3..0000000 --- a/benchmarks/test_performance.mojo +++ /dev/null @@ -1,149 +0,0 @@ -from time import now -from gojo.strings import StringBuilder -from gojo.bytes.buffer import Buffer - -# from gojo.io import STDWriter - - -fn test_string_builder() raises: - print("Testing new string builder performance") - # Create a string from the buffer - var new_builder_write_start_time = now() - var new_builder = StringBuilder() - for _ in range(10000): - _ = new_builder.write_string( - "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod" - " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" - " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea" - " commodo consequat. Duis aute irure dolor in reprehenderit in voluptate" - " velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint" - " occaecat cupidatat non proident, sunt in culpa qui officia deserunt" - " mollit anim id est laborum." - ) - var new_builder_write_execution_time = now() - new_builder_write_start_time - - var new_builder_start_time = now() - var new_output = str(new_builder) - var new_builder_execution_time = now() - new_builder_start_time - print("StringBuilder buffer len", len(new_output), "\n") - - var new_builder_render_start_time = now() - var new_output_render = str(new_builder.render()) - var new_builder_render_execution_time = now() - new_builder_render_start_time - print("StringBuilder buffer len", len(new_output_render), "\n") - # print(new_output_render) - - # Create a string using the + operator - print("Testing string concatenation performance") - var vec = List[String]() - for i in range(10000): - vec.append( - "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod" - " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" - " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea" - " commodo consequat. Duis aute irure dolor in reprehenderit in voluptate" - " velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint" - " occaecat cupidatat non proident, sunt in culpa qui officia deserunt" - " mollit anim id est laborum." - ) - - var concat_start_time = now() - var concat_output: String = "" - for i in range(len(vec)): - concat_output += vec[i] - var concat_execution_time = now() - concat_start_time - print("Concat len", len(concat_output)) - - print("Testing new buffer performance") - # Create a string from the buffer - var new_buffer_write_start_time = now() - var new_buffer = Buffer() - for _ in range(10000): - _ = new_buffer.write_string( - "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod" - " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" - " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea" - " commodo consequat. Duis aute irure dolor in reprehenderit in voluptate" - " velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint" - " occaecat cupidatat non proident, sunt in culpa qui officia deserunt" - " mollit anim id est laborum." - ) - var new_buffer_write_execution_time = now() - new_buffer_write_start_time - - var new_buffer_start_time = now() - var new_buffer_output = str(new_buffer.render()) - var new_buffer_execution_time = now() - new_buffer_start_time - print("New buffer len", len(new_output), "\n") - - print("\nWrite times:") - print("StringBuilder:", "(", new_builder_write_execution_time, "ns)") - print("BufferNew:", "(", new_buffer_write_execution_time, "ns)") - - print("\nExecution times:") - print("StringBuilder:", "(", new_builder_execution_time, "ns)") - print("StringBuilder Render:", "(", new_builder_render_execution_time, "ns)") - print("String concat:", "(", concat_execution_time, "ns)") - print("BufferNew:", "(", new_buffer_execution_time, "ns)") - - print("\nTotal Execution times:") - print("StringBuilder:", "(", new_builder_execution_time + new_builder_write_execution_time, "ns)") - print("StringBuilder Render:", "(", new_builder_render_execution_time + new_builder_write_execution_time, "ns)") - print("String concat:", "(", concat_execution_time, "ns)") - - print( - ": StringBuilder is ", - str(concat_execution_time // (new_builder_execution_time + new_builder_write_execution_time)) + "x faster", - ": StringBuilder Render is ", - str(concat_execution_time // (new_builder_render_execution_time + new_builder_write_execution_time)) - + "x faster", - ": BufferNew is ", - str(concat_execution_time // (new_buffer_execution_time + new_buffer_write_execution_time)) + "x faster", - ) - - -# fn test_std_writer_speed() raises: -# """STDWriter is roughly 6-7x faster currently.""" -# var print_start_time = now() -# for i in range(1, 10000): -# print(i) -# var print_execution_time = now() - print_start_time - -# # Create stdout writer -# var writer = STDWriter(1) -# var writer_start_time = now() -# for i in range(1, 10000): -# _ = writer.write_string(str(i)) -# var writer_execution_time = now() - writer_start_time - -# print("\n\nprint execution time (s): " + str((print_execution_time) / 1e9)) -# print("writer execution time (s): " + str((writer_execution_time) / 1e9)) -# print("Writer is ", str(print_execution_time // writer_execution_time) + "x faster") - - -fn main() raises: - # test_std_writer_speed() - test_string_builder() - - # print("Testing new string builder performance") - # # Create a string from the buffer - # var new_builder_write_start_time = now() - # var new_builder = VectorizedStringBuilder() - # for _ in range(10000): - # _ = new_builder.write_string( - # "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod" - # " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" - # " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea" - # " commodo consequat. Duis aute irure dolor in reprehenderit in voluptate" - # " velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint" - # " occaecat cupidatat non proident, sunt in culpa qui officia deserunt" - # " mollit anim id est laborum." - # ) - # var new_builder_write_execution_time = now() - new_builder_write_start_time - # print("StringBuilder:", "(", new_builder_write_execution_time, "ns)") - - # var new_builder_start_time = now() - # var new_output = str(new_builder) - # var new_builder_execution_time = now() - new_builder_start_time - # print(len(new_output)) - # print(new_output) - # print("StringBuilder:", "(", new_builder_execution_time, "ns)") diff --git a/examples/__init__.mojo b/examples/__init__.mojo deleted file mode 100644 index e69de29..0000000 diff --git a/examples/scanner/__init__.mojo b/examples/scanner/__init__.mojo deleted file mode 100644 index e69de29..0000000 diff --git a/examples/scanner/scan_text.mojo b/examples/scanner/scan_text.mojo index 712d1cb..d724e6a 100644 --- a/examples/scanner/scan_text.mojo +++ b/examples/scanner/scan_text.mojo @@ -4,7 +4,7 @@ from gojo.bufio import Reader, Scanner, scan_words fn print_words(owned text: String): # Create a reader from a string buffer - var buf = buffer.new_buffer(text^) + var buf = buffer.Buffer(buf=text.as_bytes()) var r = Reader(buf^) # Create a scanner from the reader @@ -16,7 +16,7 @@ fn print_words(owned text: String): fn print_lines(owned text: String): # Create a reader from a string buffer - var buf = buffer.new_buffer(text^) + var buf = buffer.Buffer(buf=text.as_bytes()) var r = Reader(buf^) # Create a scanner from the reader diff --git a/examples/tcp/__init__.mojo b/examples/tcp/__init__.mojo deleted file mode 100644 index e69de29..0000000 diff --git a/examples/tcp/dial_client.mojo b/examples/tcp/dial_client.mojo index aa98fd2..65eacd5 100644 --- a/examples/tcp/dial_client.mojo +++ b/examples/tcp/dial_client.mojo @@ -24,7 +24,7 @@ fn main() raises: return # Read the response from the connection - var response = List[UInt8](capacity=4096) + var response = List[UInt8, True](capacity=4096) var bytes_read: Int = 0 bytes_read, err = connection.read(response) if err and str(err) != io.EOF: diff --git a/examples/tcp/get_request.mojo b/examples/tcp/get_request.mojo index 886cf8f..4ebb909 100644 --- a/examples/tcp/get_request.mojo +++ b/examples/tcp/get_request.mojo @@ -18,7 +18,7 @@ fn main() raises: return # Read the response from the connection - var response = List[UInt8](capacity=4096) + var response = List[UInt8, True](capacity=4096) var bytes_read: Int = 0 bytes_read, err = connection.read(response) if err: diff --git a/examples/tcp/listener_server.mojo b/examples/tcp/listener_server.mojo index 606e8e4..efee547 100644 --- a/examples/tcp/listener_server.mojo +++ b/examples/tcp/listener_server.mojo @@ -9,11 +9,11 @@ fn main() raises: var connection = listener.accept() # Read the contents of the message from the client. - var bytes = List[UInt8](capacity=4096) + var bytes = List[UInt8, True](capacity=4096) var bytes_read: Int var err: Error bytes_read, err = connection.read(bytes) - if str(err) != io.EOF: + if str(err) != str(io.EOF): raise err bytes.append(0) diff --git a/examples/tcp/socket_client.mojo b/examples/tcp/socket_client.mojo index 8b51d46..26f2e58 100644 --- a/examples/tcp/socket_client.mojo +++ b/examples/tcp/socket_client.mojo @@ -21,10 +21,10 @@ fn main() raises: bytes_sent, err = socket.write(message.as_bytes()) print("Message sent:", message) - var bytes = List[UInt8](capacity=16) + var bytes = List[UInt8, True](capacity=16) var bytes_read: Int bytes_read, err = socket.read(bytes) - if str(err) != io.EOF: + if str(err) != str(io.EOF): raise err bytes.append(0) diff --git a/examples/tcp/socket_server.mojo b/examples/tcp/socket_server.mojo index 7d5e963..de81b28 100644 --- a/examples/tcp/socket_server.mojo +++ b/examples/tcp/socket_server.mojo @@ -12,18 +12,18 @@ fn main() raises: # Bind server to port 8081 socket.bind(host, port) socket.listen() - print("Listening on", socket.local_address_as_tcp()) + print("Listening on", str(socket.local_address_as_tcp())) while True: # Accept connections from clients and serve them. var connection = socket.accept() - print("Serving", connection.remote_address_as_tcp()) + print("Serving", str(connection.remote_address_as_tcp())) # Read the contents of the message from the client. - var bytes = List[UInt8](capacity=4096) + var bytes = List[UInt8, True](capacity=4096) var bytes_read: Int var err: Error bytes_read, err = connection.read(bytes) - if str(err) != io.EOF: + if str(err) != str(io.EOF): raise err bytes.append(0) diff --git a/examples/udp/__init__.mojo b/examples/udp/__init__.mojo deleted file mode 100644 index e69de29..0000000 diff --git a/gojo/__init__.mojo b/gojo/__init__.mojo deleted file mode 100644 index e69de29..0000000 diff --git a/gojo/bufio/__init__.mojo b/gojo/bufio/__init__.mojo deleted file mode 100644 index 2255063..0000000 --- a/gojo/bufio/__init__.mojo +++ /dev/null @@ -1,2 +0,0 @@ -from .bufio import Reader, Writer, ReadWriter, new_writer, new_reader, new_read_writer -from .scan import Scanner, scan_words, scan_bytes, scan_lines, scan_runes diff --git a/gojo/bufio/bufio.mojo b/gojo/bufio/bufio.mojo deleted file mode 100644 index ffe8c79..0000000 --- a/gojo/bufio/bufio.mojo +++ /dev/null @@ -1,1005 +0,0 @@ -from collections import InlineList -import ..io -from ..builtins import copy, panic -from ..builtins.bytes import index_byte -from ..strings import StringBuilder - -alias MIN_READ_BUFFER_SIZE = 16 -alias MAX_CONSECUTIVE_EMPTY_READS = 100 - -alias ERR_INVALID_UNREAD_BYTE = "bufio: invalid use of unread_byte" -alias ERR_INVALID_UNREAD_RUNE = "bufio: invalid use of unread_rune" -alias ERR_BUFFER_FULL = "bufio: buffer full" -alias ERR_NEGATIVE_COUNT = "bufio: negative count" -alias ERR_NEGATIVE_READ = "bufio: reader returned negative count from Read" -alias ERR_NEGATIVE_WRITE = "bufio: writer returned negative count from write" - - -# buffered input -# TODO: Uncomment write_to and write_buf once the bug with the trait's Span argument is fixed. -struct Reader[R: io.Reader, size: Int = MIN_READ_BUFFER_SIZE](Sized, io.Reader, io.ByteReader, io.ByteScanner): - """Implements buffering for an io.Reader object.""" - - var buf: InlineList[UInt8, size] - var reader: R # reader provided by the client - var read_pos: Int - var write_pos: Int # buf read and write positions - var last_byte: Int # last byte read for unread_byte; -1 means invalid - var last_rune_size: Int # size of last rune read for unread_rune; -1 means invalid - var err: Error - - @always_inline - fn __init__( - inout self, - owned reader: R, - buf: InlineList[UInt8, size] = InlineList[UInt8, size](), - read_pos: Int = 0, - write_pos: Int = 0, - last_byte: Int = -1, - last_rune_size: Int = -1, - ): - self.buf = InlineList[UInt8, size]() - for element in buf: - self.buf.append(element[]) - - self.reader = reader^ - self.read_pos = read_pos - self.write_pos = write_pos - self.last_byte = last_byte - self.last_rune_size = last_rune_size - self.err = Error() - - @always_inline - fn __moveinit__(inout self, owned existing: Self): - self.buf = InlineList[UInt8, size]() - for element in existing.buf: - self.buf.append(element[]) - - self.reader = existing.reader^ - self.read_pos = existing.read_pos - self.write_pos = existing.write_pos - self.last_byte = existing.last_byte - self.last_rune_size = existing.last_rune_size - self.err = existing.err^ - - # size returns the size of the underlying buffer in bytes. - @always_inline - fn __len__(self) -> Int: - return len(self.buf) - - # reset discards any buffered data, resets all state, and switches - # the buffered reader to read from r. - # Calling reset on the zero value of [Reader] initializes the internal buffer - # to the default size. - # Calling self.reset(b) (that is, resetting a [Reader] to itself) does nothing. - # fn reset[R: io.Reader](self, reader: R): - # # If a Reader r is passed to NewReader, NewReader will return r. - # # Different layers of code may do that, and then later pass r - # # to reset. Avoid infinite recursion in that case. - # if self == reader: - # return - - # # if self.buf == nil: - # # self.buf = make(InlineList[UInt8, io.BUFFER_SIZE], io.BUFFER_SIZE) - - # self.reset(self.buf, r) - - @always_inline - fn as_bytes_slice(inout self) -> Span[UInt8, True, __lifetime_of(self)]: - """Returns the internal buffer data as a Span[UInt8].""" - return Span[UInt8, True, __lifetime_of(self)](array=Reference(self.buf._array)) - - @always_inline - fn reset(inout self, buf: InlineList[UInt8, size], owned reader: R): - """Discards any buffered data, resets all state, and switches - the buffered reader to read from r. - Calling reset on the zero value of [Reader] initializes the internal buffer - to the default size. - Calling self.reset(b) (that is, resetting a [Reader] to itself) does nothing.""" - self = Reader[R, size]( - buf=buf, - reader=reader^, - last_byte=-1, - last_rune_size=-1, - ) - - fn fill(inout self): - """Reads a new chunk into the buffer.""" - # Slide existing data to beginning. - if self.read_pos > 0: - var data_to_slide = self.as_bytes_slice()[self.read_pos : self.write_pos] - # TODO: Temp copying of elements until I figure out a better pattern or slice refs are added - for i in range(len(data_to_slide)): - self.buf[i] = data_to_slide[i] - - # self.buf.reserve(current_capacity) - self.write_pos -= self.read_pos - self.read_pos = 0 - - # Compares to the length of the entire InlineList[UInt8, io.BUFFER_SIZE] object, including 0 initialized positions. - # IE. var b = InlineList[UInt8, io.BUFFER_SIZE](capacity=4096), then trying to write at b[4096] and onwards will fail. - if self.write_pos >= io.BUFFER_SIZE: - panic("bufio.Reader: tried to fill full buffer") - - # Read new data: try a limited number of times. - var i: Int = MAX_CONSECUTIVE_EMPTY_READS - while i > 0: - # TODO: Using temp until slicing can return a Reference, does reading directly into a Span of self.buf work? - # Maybe we need to read into the end of the buffer. - var span = self.as_bytes_slice() - var bytes_read: Int - var err: Error - bytes_read, err = self.reader._read(span, len(self.buf)) - if bytes_read < 0: - panic(ERR_NEGATIVE_READ) - - self.write_pos += bytes_read - - if err: - self.err = err - return - - if bytes_read > 0: - return - - i -= 1 - - self.err = Error(str(io.ERR_NO_PROGRESS)) - - @always_inline - fn read_error(inout self) -> Error: - if not self.err: - return Error() - - var err = self.err - self.err = Error() - return err - - fn peek(self: Reference[Self, True], number_of_bytes: Int) -> (Span[UInt8, self.is_mutable, self.lifetime], Error): - """Returns the next n bytes without advancing the reader. The bytes stop - being valid at the next read call. If Peek returns fewer than n bytes, it - also returns an error explaining why the read is short. The error is - [ERR_BUFFER_FULL] if number_of_bytes is larger than b's buffer size. - - Calling Peek prevents a [Reader.unread_byte] or [Reader.unread_rune] call from succeeding - until the next read operation. - - Args: - number_of_bytes: The number of bytes to peek. - """ - if number_of_bytes < 0: - return self[].as_bytes_slice()[0:0], Error(ERR_NEGATIVE_COUNT) - - self[].last_byte = -1 - self[].last_rune_size = -1 - - while ( - self[].write_pos - self[].read_pos < number_of_bytes and self[].write_pos - self[].read_pos < io.BUFFER_SIZE - ): - self[].fill() # self.write_pos-self.read_pos < self.capacity => buffer is not full - - if number_of_bytes > io.BUFFER_SIZE: - return self[].as_bytes_slice()[self[].read_pos : self[].write_pos], Error(ERR_BUFFER_FULL) - - # 0 <= n <= io.BUFFER_SIZE - var err = Error() - var available_space = self[].write_pos - self[].read_pos - if available_space < number_of_bytes: - # not enough data in buffer - err = self[].read_error() - if not err: - err = Error(ERR_BUFFER_FULL) - - return self[].as_bytes_slice()[self[].read_pos : self[].read_pos + number_of_bytes], err - - fn discard(inout self, number_of_bytes: Int) -> (Int, Error): - """Discard skips the next n bytes, returning the number of bytes discarded. - - If Discard skips fewer than n bytes, it also returns an error. - If 0 <= number_of_bytes <= self.buffered(), Discard is guaranteed to succeed without - reading from the underlying io.Reader. - """ - if number_of_bytes < 0: - return 0, Error(ERR_NEGATIVE_COUNT) - - if number_of_bytes == 0: - return 0, Error() - - self.last_byte = -1 - self.last_rune_size = -1 - - var remain = number_of_bytes - while True: - var skip = self.buffered() - if skip == 0: - self.fill() - skip = self.buffered() - - if skip > remain: - skip = remain - - self.read_pos += skip - remain -= skip - if remain == 0: - return number_of_bytes, Error() - - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): - """Reads data into dest. - It returns the number of bytes read into dest. - The bytes are taken from at most one Read on the underlying [Reader], - hence n may be less than len(src). - To read exactly len(src) bytes, use io.ReadFull(b, src). - If the underlying [Reader] can return a non-zero count with io.EOF, - then this Read method can do so as well; see the [io.Reader] docs.""" - if capacity == 0: - if self.buffered() > 0: - return 0, Error() - return 0, self.read_error() - - var bytes_read: Int = 0 - if self.read_pos == self.write_pos: - if capacity >= len(self.buf): - # Large read, empty buffer. - # Read directly into dest to avoid copy. - var bytes_read: Int - bytes_read, self.err = self.reader._read(dest, capacity) - - if bytes_read < 0: - panic(ERR_NEGATIVE_READ) - - if bytes_read > 0: - self.last_byte = int(dest[bytes_read - 1]) - self.last_rune_size = -1 - - return bytes_read, self.read_error() - - # One read. - # Do not use self.fill, which will loop. - self.read_pos = 0 - self.write_pos = 0 - var buf = self.as_bytes_slice() # TODO: I'm hoping this reads into self.data directly lol - var bytes_read: Int - bytes_read, self.err = self.reader._read(buf, len(buf)) - - if bytes_read < 0: - panic(ERR_NEGATIVE_READ) - - if bytes_read == 0: - return 0, self.read_error() - - self.write_pos += bytes_read - - # copy as much as we can - var source = self.as_bytes_slice()[self.read_pos : self.write_pos] - bytes_read = 0 - var start = len(dest) - - for i in range(len(source)): - dest[i + start] = source[i] - bytes_read += 1 - dest._len += bytes_read - self.read_pos += bytes_read - self.last_byte = int(self.buf[self.read_pos - 1]) - self.last_rune_size = -1 - return bytes_read, Error() - - @always_inline - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): - """Reads data into dest. - It returns the number of bytes read into dest. - The bytes are taken from at most one Read on the underlying [Reader], - hence n may be less than len(src). - To read exactly len(src) bytes, use io.ReadFull(b, src). - If the underlying [Reader] can return a non-zero count with io.EOF, - then this Read method can do so as well; see the [io.Reader] docs.""" - - var span = Span(dest) - - var bytes_read: Int - var err: Error - bytes_read, err = self._read(span, dest.capacity) - dest.size += bytes_read - - return bytes_read, err - - @always_inline - fn read_byte(inout self) -> (UInt8, Error): - """Reads and returns a single byte from the internal buffer. If no byte is available, returns an error.""" - self.last_rune_size = -1 - while self.read_pos == self.write_pos: - if self.err: - return UInt8(0), self.read_error() - self.fill() # buffer is empty - - var c = self.as_bytes_slice()[self.read_pos] - self.read_pos += 1 - self.last_byte = int(c) - return c, Error() - - @always_inline - fn unread_byte(inout self) -> Error: - """Unreads the last byte. Only the most recently read byte can be unread. - - unread_byte returns an error if the most recent method called on the - [Reader] was not a read operation. Notably, [Reader.peek], [Reader.discard], and [Reader.write_to] are not - considered read operations. - """ - if self.last_byte < 0 or self.read_pos == 0 and self.write_pos > 0: - return Error(ERR_INVALID_UNREAD_BYTE) - - # self.read_pos > 0 or self.write_pos == 0 - if self.read_pos > 0: - self.read_pos -= 1 - else: - # self.read_pos == 0 and self.write_pos == 0 - self.write_pos = 1 - - self.as_bytes_slice()[self.read_pos] = self.last_byte - self.last_byte = -1 - self.last_rune_size = -1 - return Error() - - # # read_rune reads a single UTF-8 encoded Unicode character and returns the - # # rune and its size in bytes. If the encoded rune is invalid, it consumes one byte - # # and returns unicode.ReplacementChar (U+FFFD) with a size of 1. - # fn read_rune(inout self) (r rune, size int, err error): - # for self.read_pos+utf8.UTFMax > self.write_pos and !utf8.FullRune(self.as_bytes_slice()[self.read_pos:self.write_pos]) and self.err == nil and self.write_pos-self.read_pos < self.buf.capacity: - # self.fill() # self.write_pos-self.read_pos < len(buf) => buffer is not full - - # self.last_rune_size = -1 - # if self.read_pos == self.write_pos: - # return 0, 0, self.read_poseadErr() - - # r, size = rune(self.as_bytes_slice()[self.read_pos]), 1 - # if r >= utf8.RuneSelf: - # r, size = utf8.DecodeRune(self.as_bytes_slice()[self.read_pos:self.write_pos]) - - # self.read_pos += size - # self.last_byte = int(self.as_bytes_slice()[self.read_pos-1]) - # self.last_rune_size = size - # return r, size, nil - - # # unread_rune unreads the last rune. If the most recent method called on - # # the [Reader] was not a [Reader.read_rune], [Reader.unread_rune] returns an error. (In this - # # regard it is stricter than [Reader.unread_byte], which will unread the last byte - # # from any read operation.) - # fn unread_rune() error: - # if self.last_rune_size < 0 or self.read_pos < self.last_rune_size: - # return ERR_INVALID_UNREAD_RUNE - - # self.read_pos -= self.last_rune_size - # self.last_byte = -1 - # self.last_rune_size = -1 - # return nil - - @always_inline - fn buffered(self) -> Int: - """Returns the number of bytes that can be read from the current buffer. - - Returns: - The number of bytes that can be read from the current buffer. - """ - return self.write_pos - self.read_pos - - fn read_slice(self: Reference[Self, True], delim: UInt8) -> (Span[UInt8, self.is_mutable, self.lifetime], Error): - """Reads until the first occurrence of delim in the input, - returning a slice pointing at the bytes in the buffer. It includes the first occurrence of the delimiter. - The bytes stop being valid at the next read. - If read_slice encounters an error before finding a delimiter, - it returns all the data in the buffer and the error itself (often io.EOF). - read_slice fails with error [ERR_BUFFER_FULL] if the buffer fills without a delim. - Because the data returned from read_slice will be overwritten - by the next I/O operation, most clients should use - [Reader.read_bytes] or read_string instead. - read_slice returns err != nil if and only if line does not end in delim. - - Args: - delim: The delimiter to search for. - - Returns: - The Span[UInt8] from the internal buffer. - """ - var err = Error() - var s = 0 # search start index - var line: Span[UInt8, self.is_mutable, self.lifetime] - while True: - # Search buffer. - var i = index_byte(self[].as_bytes_slice()[self[].read_pos + s : self[].write_pos], delim) - if i >= 0: - i += s - line = self[].as_bytes_slice()[self[].read_pos : self[].read_pos + i + 1] - self[].read_pos += i + 1 - break - - # Pending error? - if self[].err: - line = self[].as_bytes_slice()[self[].read_pos : self[].write_pos] - self[].read_pos = self[].write_pos - err = self[].read_error() - break - - # Buffer full? - if self[].buffered() >= io.BUFFER_SIZE: - self[].read_pos = self[].write_pos - line = self[].as_bytes_slice() - err = Error(ERR_BUFFER_FULL) - break - - s = self[].write_pos - self[].read_pos # do not rescan area we scanned before - self[].fill() # buffer is not full - - # Handle last byte, if any. - var i = len(line) - 1 - if i >= 0: - self[].last_byte = int(line[i]) - self[].last_rune_size = -1 - - return line, err - - fn read_line(inout self: Self) -> (List[UInt8], Bool): - """Low-level line-reading primitive. Most callers should use - [Reader.read_bytes]('\n') or [Reader.read_string]('\n') instead or use a [Scanner]. - - read_line tries to return a single line, not including the end-of-line bytes. - If the line was too long for the buffer then isPrefix is set and the - beginning of the line is returned. The rest of the line will be returned - from future calls. isPrefix will be false when returning the last fragment - of the line. The returned buffer is only valid until the next call to - read_line. read_line either returns a non-nil line or it returns an error, - never both. - - The text returned from read_line does not include the line end ("\r\n" or "\n"). - No indication or error is given if the input ends without a final line end. - Calling [Reader.unread_byte] after read_line will always unread the last byte read - (possibly a character belonging to the line end) even if that byte is not - part of the line returned by read_line. - """ - var line: Span[UInt8, True, __lifetime_of(self)] - var err: Error - line, err = self.read_slice(ord("\n")) - - if err and str(err) == ERR_BUFFER_FULL: - # Handle the case where "\r\n" straddles the buffer. - if len(line) > 0 and line[len(line) - 1] == ord("\r"): - # Put the '\r' back on buf and drop it from line. - # Let the next call to read_line check for "\r\n". - if self.read_pos == 0: - # should be unreachable - panic("bufio: tried to rewind past start of buffer") - - self.read_pos -= 1 - line = line[: len(line) - 1] - return List[UInt8](line), True - - if len(line) == 0: - return List[UInt8](line), False - - if line[len(line) - 1] == ord("\n"): - var drop = 1 - if len(line) > 1 and line[len(line) - 2] == ord("\r"): - drop = 2 - - line = line[: len(line) - drop] - - return List[UInt8](line), False - - fn collect_fragments( - inout self, delim: UInt8 - ) -> (List[List[UInt8]], Span[UInt8, True, __lifetime_of(self)], Int, Error): - """Reads until the first occurrence of delim in the input. It - returns (slice of full buffers, remaining bytes before delim, total number - of bytes in the combined first two elements, error). - - Args: - delim: The delimiter to search for. - """ - # Use read_slice to look for delim, accumulating full buffers. - var err = Error() - var full_buffers = List[List[UInt8]]() - var total_len = 0 - var frag: Span[UInt8, True, __lifetime_of(self)] - while True: - frag, err = self.read_slice(delim) - if not err: - break - - var read_slice_error = err - if str(read_slice_error) != ERR_BUFFER_FULL: - err = read_slice_error - break - - # Make a copy of the buffer Span. - var buf = List[UInt8](frag) - full_buffers.append(buf) - total_len += len(buf) - - total_len += len(frag) - return full_buffers, frag, total_len, err - - fn read_bytes(inout self, delim: UInt8) -> (List[UInt8], Error): - """Reads until the first occurrence of delim in the input, - returning a slice containing the data up to and including the delimiter. - If read_bytes encounters an error before finding a delimiter, - it returns the data read before the error and the error itself (often io.EOF). - read_bytes returns err != nil if and only if the returned data does not end in - delim. - For simple uses, a Scanner may be more convenient. - - Args: - delim: The delimiter to search for. - - Returns: - The List[UInt8] from the internal buffer. - """ - var full: List[List[UInt8]] - var frag: Span[UInt8, True, __lifetime_of(self)] - var n: Int - var err: Error - full, frag, n, err = self.collect_fragments(delim) - - # Allocate new buffer to hold the full pieces and the fragment. - var buf = List[UInt8](capacity=n) - n = 0 - - # copy full pieces and fragment in. - for i in range(len(full)): - var buffer = full[i] - n += copy(buf, buffer, n) - - _ = copy(buf, frag, n) - - return buf, err - - fn read_string(inout self, delim: UInt8) -> (String, Error): - """Reads until the first occurrence of delim in the input, - returning a string containing the data up to and including the delimiter. - If read_string encounters an error before finding a delimiter, - it returns the data read before the error and the error itself (often io.EOF). - read_string returns err != nil if and only if the returned data does not end in - delim. - For simple uses, a Scanner may be more convenient. - - Args: - delim: The delimiter to search for. - - Returns: - The String from the internal buffer. - """ - var full: List[List[UInt8]] - var frag: Span[UInt8, True, __lifetime_of(self)] - var n: Int - var err: Error - full, frag, n, err = self.collect_fragments(delim) - - # Allocate new buffer to hold the full pieces and the fragment. - var buf = StringBuilder(capacity=n) - - # copy full pieces and fragment in. - for i in range(len(full)): - var buffer = full[i] - _ = buf.write(Span(buffer)) - - _ = buf.write(frag) - return str(buf), err - - # fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int, Error): - # """Writes the internal buffer to the writer. This may make multiple calls to the [Reader.Read] method of the underlying [Reader]. - # If the underlying reader supports the [Reader.WriteTo] method, - # this calls the underlying [Reader.WriteTo] without buffering. - # write_to implements io.WriterTo. - - # Args: - # writer: The writer to write to. - - # Returns: - # The number of bytes written. - # """ - # self.last_byte = -1 - # self.last_rune_size = -1 - - # var bytes_written: Int - # var err: Error - # bytes_written, err = self.write_buf(writer) - # if err: - # return bytes_written, err - - # # internal buffer not full, fill before writing to writer - # if (self.write_pos - self.read_pos) < io.BUFFER_SIZE: - # self.fill() - - # while self.read_pos < self.write_pos: - # # self.read_pos < self.write_pos => buffer is not empty - # var bw: Int - # var err: Error - # bw, err = self.write_buf(writer) - # bytes_written += bw - - # self.fill() # buffer is empty - - # return bytes_written, Error() - - # fn write_buf[W: io.Writer](inout self, inout writer: W) -> (Int, Error): - # """Writes the [Reader]'s buffer to the writer. - - # Args: - # writer: The writer to write to. - - # Returns: - # The number of bytes written. - # """ - # # Nothing to write - # if self.read_pos == self.write_pos: - # return Int(0), Error() - - # # Write the buffer to the writer, if we hit EOF it's fine. That's not a failure condition. - # var bytes_written: Int - # var err: Error - # var buf_to_write = self.as_bytes_slice()[self.read_pos : self.write_pos] - # bytes_written, err = writer.write(List[UInt8](buf_to_write)) - # if err: - # return bytes_written, err - - # if bytes_written < 0: - # panic(ERR_NEGATIVE_WRITE) - - # self.read_pos += bytes_written - # return Int(bytes_written), Error() - - -fn new_reader[R: io.Reader, size: Int = MIN_READ_BUFFER_SIZE](owned reader: R) -> Reader[R, size]: - """Returns a new [Reader] whose buffer has at least the specified - size. If the argument io.Reader is already a [Reader] with large enough - size, it returns the underlying [Reader]. - - Args: - reader: The reader to read from. - - Params: - size: The size of the buffer. - - Returns: - The new [Reader]. - """ - var r = Reader[R, size](reader^) - return r^ - - -# buffered output -struct Writer[W: io.Writer, size: Int = io.BUFFER_SIZE]( - Sized, io.Writer, io.ByteWriter, io.StringWriter, io.ReaderFrom -): - """Implements buffering for an [io.Writer] object. - # If an error occurs writing to a [Writer], no more data will be - # accepted and all subsequent writes, and [Writer.flush], will return the error. - # After all data has been written, the client should call the - # [Writer.flush] method to guarantee all data has been forwarded to - # the underlying [io.Writer].""" - - var buf: InlineList[UInt8, size] - var bytes_written: Int - var writer: W - var err: Error - - @always_inline - fn __init__( - inout self, - owned writer: W, - buf: InlineList[UInt8, size] = InlineList[UInt8, size](), - bytes_written: Int = 0, - ): - self.buf = InlineList[UInt8, size]() - for element in buf: - self.buf.append(element[]) - self.bytes_written = bytes_written - self.writer = writer^ - self.err = Error() - - @always_inline - fn __moveinit__(inout self, owned existing: Self): - self.buf = InlineList[UInt8, size]() - for element in existing.buf: - self.buf.append(element[]) - self.bytes_written = existing.bytes_written - self.writer = existing.writer^ - self.err = existing.err^ - - @always_inline - fn __len__(self) -> Int: - """Returns the size of the underlying buffer in bytes.""" - return len(self.buf) - - @always_inline - fn as_bytes_slice(inout self) -> Span[UInt8, True, __lifetime_of(self)]: - """Returns the internal buffer data as a Span[UInt8].""" - return Span[UInt8, True, __lifetime_of(self)](array=Reference(self.buf._array)) - - @always_inline - fn reset(inout self, owned writer: W): - """Discards any unflushed buffered data, clears any error, and - resets b to write its output to w. - Calling reset on the zero value of [Writer] initializes the internal buffer - to the default size. - Calling w.reset(w) (that is, resetting a [Writer] to itself) does nothing. - - Args: - writer: The writer to write to. - """ - # # If a Writer w is passed to new_writer, new_writer will return w. - # # Different layers of code may do that, and then later pass w - # # to reset. Avoid infinite recursion in that case. - # if self == writer: - # return - - # if self.buf == nil: - # self.buf = make(InlineList[UInt8, io.BUFFER_SIZE], io.BUFFER_SIZE) - - self.err = Error() - self.bytes_written = 0 - self.writer = writer^ - - fn flush(inout self) -> Error: - """Writes any buffered data to the underlying [io.Writer].""" - # Prior to attempting to flush, check if there's a pre-existing error or if there's nothing to flush. - var err = Error() - if self.err: - return self.err - if self.bytes_written == 0: - return err - - var bytes_written: Int = 0 - bytes_written, err = self.writer.write(self.as_bytes_slice()[0 : self.bytes_written]) - - # If the write was short, set a short write error and try to shift up the remaining bytes. - if bytes_written < self.bytes_written and not err: - err = Error(str(io.ERR_SHORT_WRITE)) - - if err: - if bytes_written > 0 and bytes_written < self.bytes_written: - # TODO: Temp copying of elements until I figure out a better pattern or slice refs are added - var temp = self.as_bytes_slice()[bytes_written : self.bytes_written] - for i in range(len(temp)): - if i > len(temp): - self.buf[i] = temp[i] - else: - self.buf.append(temp[i]) - - self.bytes_written -= bytes_written - self.err = err - return err - - # Reset the buffer - self.buf = InlineList[UInt8, size]() - self.bytes_written = 0 - return err - - @always_inline - fn available(self) -> Int: - """Returns how many bytes are unused in the buffer.""" - return self.buf.capacity - len(self.buf) - - @always_inline - fn buffered(self) -> Int: - """Returns the number of bytes that have been written into the current buffer. - - Returns: - The number of bytes that have been written into the current buffer. - """ - return self.bytes_written - - fn _write(inout self, src: Span[UInt8]) -> (Int, Error): - """Writes the contents of src into the buffer. - It returns the number of bytes written. - If nn < len(src), it also returns an error explaining - why the write is short. - - Args: - src: The bytes to write. - - Returns: - The number of bytes written. - """ - var total_bytes_written: Int = 0 - var src_copy = src # TODO: Make a copy, maybe try a non owning Span - var err = Error() - while len(src_copy) > self.available() and not self.err: - var bytes_written: Int = 0 - if self.buffered() == 0: - # Large write, empty buffer. - # write directly from p to avoid copy. - bytes_written, err = self.writer.write(src_copy) - self.err = err - else: - # TODO: Temp copying of elements until I figure out a better pattern or slice refs are added - for i in range(len(src_copy)): - if i + self.bytes_written > len(src_copy): - self.buf[i + self.bytes_written] = src_copy[i] - else: - self.buf.append(src_copy[i]) - bytes_written += 1 - - self.bytes_written += bytes_written - _ = self.flush() - - total_bytes_written += bytes_written - src_copy = src_copy[bytes_written : len(src_copy)] - - if self.err: - return total_bytes_written, self.err - - # TODO: Temp copying of elements until I figure out a better pattern or slice refs are added - var n = 0 - for i in range(len(src_copy)): - if i + self.bytes_written > len(src_copy): - self.buf[i + self.bytes_written] = src_copy[i] - else: - self.buf.append(src_copy[i]) - n += 1 - self.bytes_written += n - total_bytes_written += n - return total_bytes_written, err - - @always_inline - fn write(inout self, src: List[UInt8]) -> (Int, Error): - """ - Appends a byte List to the builder buffer. - - Args: - src: The byte array to append. - """ - var span = Span(src) - - var bytes_read: Int - var err: Error - bytes_read, err = self._write(span) - - return bytes_read, err - - @always_inline - fn write_byte(inout self, src: UInt8) -> (Int, Error): - """Writes a single byte to the internal buffer. - - Args: - src: The byte to write. - """ - if self.err: - return 0, self.err - # If buffer is full, flush to the underlying writer. - var err = self.flush() - if self.available() <= 0 and err: - return 0, self.err - - self.buf.append(src) - self.bytes_written += 1 - - return 1, Error() - - # # WriteRune writes a single Unicode code point, returning - # # the number of bytes written and any error. - # fn WriteRune(r rune) (size int, err error): - # # Compare as uint32 to correctly handle negative runes. - # if uint32(r) < utf8.RuneSelf: - # err = self.write_posriteByte(byte(r)) - # if err != nil: - # return 0, err - - # return 1, nil - - # if self.err != nil: - # return 0, self.err - - # n := self.available() - # if n < utf8.UTFMax: - # if self.flush(); self.err != nil: - # return 0, self.err - - # n = self.available() - # if n < utf8.UTFMax: - # # Can only happen if buffer is silly small. - # return self.write_posriteString(string(r)) - - # size = utf8.EncodeRune(self.as_bytes_slice()[self.bytes_written:], r) - # self.bytes_written += size - # return size, nil - - @always_inline - fn write_string(inout self, src: String) -> (Int, Error): - """Writes a string to the internal buffer. - It returns the number of bytes written. - If the count is less than len(s), it also returns an error explaining - why the write is short. - - Args: - src: The string to write. - - Returns: - The number of bytes written. - """ - return self.write(src.as_bytes_slice()) - - fn read_from[R: io.Reader](inout self, inout reader: R) -> (Int, Error): - """Implements [io.ReaderFrom]. If the underlying writer - supports the read_from method, this calls the underlying read_from. - If there is buffered data and an underlying read_from, this fills - the buffer and writes it before calling read_from. - - Args: - reader: The reader to read from. - - Returns: - The number of bytes read. - """ - if self.err: - return 0, self.err - - var bytes_read: Int = 0 - var total_bytes_written: Int = 0 - var err = Error() - while True: - if self.available() == 0: - var err = self.flush() - if err: - return total_bytes_written, err - - var nr = 0 - while nr < MAX_CONSECUTIVE_EMPTY_READS: - # Read into remaining unused space in the buffer. - var buf = self.as_bytes_slice()[self.bytes_written : len(self.buf)] - bytes_read, err = reader._read(buf, len(buf)) - - if bytes_read != 0 or err: - break - nr += 1 - - if nr == MAX_CONSECUTIVE_EMPTY_READS: - return bytes_read, io.ERR_NO_PROGRESS - - self.bytes_written += bytes_read - total_bytes_written += bytes_read - if err: - break - - if err and str(err) == str(io.EOF): - # If we filled the buffer exactly, flush preemptively. - if self.available() == 0: - err = self.flush() - else: - err = Error() - - return total_bytes_written, Error() - - -fn new_writer[W: io.Writer, size: Int = io.BUFFER_SIZE](owned writer: W) -> Writer[W, size]: - """Returns a new [Writer] whose buffer has at least the specified - size. If the argument io.Writer is already a [Writer] with large enough - size, it returns the underlying [Writer].""" - # Is it already a Writer? - # b, ok := w.(*Writer) - # if ok and self.buf.capacity >= size: - # return b - - constrained[size > 0, "bufio: invalid buffer size. Must be greater than 0."]() - - return Writer[W, size]( - buf=InlineList[UInt8, size](), - writer=writer^, - bytes_written=0, - ) - - -# buffered input and output -struct ReadWriter[R: io.Reader, W: io.Writer](): - """ReadWriter stores pointers to a [Reader] and a [Writer]. - It implements [io.ReadWriter].""" - - var reader: R - var writer: W - - fn __init__(inout self, owned reader: R, owned writer: W): - self.reader = reader^ - self.writer = writer^ - - -# new_read_writer -fn new_read_writer[R: io.Reader, W: io.Writer](owned reader: R, owned writer: W) -> ReadWriter[R, W]: - """Allocates a new [ReadWriter] that dispatches to r and w.""" - return ReadWriter[R, W](reader^, writer^) diff --git a/gojo/builtins/__init__.mojo b/gojo/builtins/__init__.mojo deleted file mode 100644 index 0827829..0000000 --- a/gojo/builtins/__init__.mojo +++ /dev/null @@ -1,5 +0,0 @@ -from .bytes import Byte, index_byte, has_suffix, has_prefix, to_string -from .attributes import cap, copy -from .errors import exit, panic - -alias Rune = Int32 diff --git a/gojo/builtins/attributes.mojo b/gojo/builtins/attributes.mojo deleted file mode 100644 index 7ce0b15..0000000 --- a/gojo/builtins/attributes.mojo +++ /dev/null @@ -1,141 +0,0 @@ -from collections import InlineList - - -fn copy[T: CollectionElement](inout target: List[T], source: List[T], start: Int = 0) -> Int: - """Copies the contents of source into target at the same index. Returns the number of bytes copied. - Added a start parameter to specify the index to start copying into. - - Args: - target: The buffer to copy into. - source: The buffer to copy from. - start: The index to start copying into. - - Returns: - The number of bytes copied. - """ - var count = 0 - - for i in range(len(source)): - if i + start > len(target): - target[i + start] = source[i] - else: - target.append(source[i]) - count += 1 - - return count - - -fn copy[T: CollectionElement](inout target_span: Span[T, True], source_span: Span[T], start: Int = 0) -> Int: - """Copies the contents of source into target at the same index. Returns the number of bytes copied. - Added a start parameter to specify the index to start copying into. - - Args: - target_span: The buffer to copy into. - source_span: The buffer to copy from. - start: The index to start copying into. - - Returns: - The number of bytes copied. - """ - var count = 0 - - for i in range(len(source_span)): - target_span[i + start] = source_span[i] - count += 1 - - target_span._len += count - return count - - -fn copy[T: CollectionElement](inout target_span: Span[T, True], source: InlineList[T], start: Int = 0) -> Int: - """Copies the contents of source into target at the same index. Returns the number of bytes copied. - Added a start parameter to specify the index to start copying into. - - Args: - target_span: The buffer to copy into. - source: The buffer to copy from. - start: The index to start copying into. - - Returns: - The number of bytes copied. - """ - var count = 0 - - for i in range(len(source)): - target_span[i + start] = source[i] - count += 1 - - target_span._len += count - return count - - -fn test(inout dest: List[UInt8]): - var source = List[UInt8](1, 2, 3) - var target = Span[UInt8](dest) - - _ = copy(target, Span(source), start=0) - - -fn copy[T: CollectionElement](inout list: InlineList[T], source: Span[T], start: Int = 0) -> Int: - """Copies the contents of source into target at the same index. Returns the number of bytes copied. - Added a start parameter to specify the index to start copying into. - - Args: - list: The buffer to copy into. - source: The buffer to copy from. - start: The index to start copying into. - - Returns: - The number of bytes copied. - """ - var count = 0 - - for i in range(len(source)): - if i + start > len(list): - list[i + start] = source[i] - else: - list.append(source[i]) - count += 1 - - return count - - -fn copy( - inout target: List[UInt8], - source: DTypePointer[DType.uint8], - source_start: Int, - source_end: Int, - target_start: Int = 0, -) -> Int: - """Copies the contents of source into target at the same index. Returns the number of bytes copied. - Added a start parameter to specify the index to start copying into. - - Args: - target: The buffer to copy into. - source: The buffer to copy from. - source_start: The index to start copying from. - source_end: The index to stop copying at. - target_start: The index to start copying into. - - Returns: - The number of bytes copied. - """ - var count = 0 - - for i in range(source_start, source_end): - if i + target_start > len(target): - target[i + target_start] = source[i] - else: - target.append(source[i]) - count += 1 - - return count - - -fn cap[T: CollectionElement](iterable: List[T]) -> Int: - """Returns the capacity of the List. - - Args: - iterable: The List to get the capacity of. - """ - return iterable.capacity diff --git a/gojo/builtins/bytes.mojo b/gojo/builtins/bytes.mojo deleted file mode 100644 index ff0dcb5..0000000 --- a/gojo/builtins/bytes.mojo +++ /dev/null @@ -1,107 +0,0 @@ -alias Byte = UInt8 - - -fn equals(left: List[UInt8], right: List[UInt8]) -> Bool: - if len(left) != len(right): - return False - for i in range(len(left)): - if left[i] != right[i]: - return False - return True - - -fn has_prefix(bytes: List[Byte], prefix: List[Byte]) -> Bool: - """Reports whether the List[Byte] struct begins with prefix. - - Args: - bytes: The List[Byte] struct to search. - prefix: The prefix to search for. - - Returns: - True if the List[Byte] struct begins with prefix; otherwise, False. - """ - var len_comparison = len(bytes) >= len(prefix) - var prefix_comparison = equals(bytes[0 : len(prefix)], prefix) - return len_comparison and prefix_comparison - - -fn has_suffix(bytes: List[Byte], suffix: List[Byte]) -> Bool: - """Reports whether the List[Byte] struct ends with suffix. - - Args: - bytes: The List[Byte] struct to search. - suffix: The prefix to search for. - - Returns: - True if the List[Byte] struct ends with suffix; otherwise, False. - """ - var len_comparison = len(bytes) >= len(suffix) - var suffix_comparison = equals(bytes[len(bytes) - len(suffix) : len(bytes)], suffix) - return len_comparison and suffix_comparison - - -fn index_byte(bytes: List[Byte], delim: Byte) -> Int: - """Return the index of the first occurrence of the byte delim. - - Args: - bytes: The List[Byte] struct to search. - delim: The byte to search for. - - Returns: - The index of the first occurrence of the byte delim. - """ - for i in range(len(bytes)): - if bytes[i] == delim: - return i - - return -1 - - -fn index_byte(bytes: DTypePointer[DType.uint8], size: Int, delim: Byte) -> Int: - """Return the index of the first occurrence of the byte delim. - - Args: - bytes: The DTypePointer[DType.int8] struct to search. - size: The size of the bytes pointer. - delim: The byte to search for. - - Returns: - The index of the first occurrence of the byte delim. - """ - for i in range(size): - if UInt8(bytes[i]) == delim: - return i - - return -1 - - -fn index_byte(bytes: Span[UInt8], delim: Byte) -> Int: - """Return the index of the first occurrence of the byte delim. - - Args: - bytes: The Span to search. - delim: The byte to search for. - - Returns: - The index of the first occurrence of the byte delim. - """ - for i in range(len(bytes)): - if bytes[i] == delim: - return i - - return -1 - - -fn to_string(bytes: List[Byte]) -> String: - """Makes a deepcopy of the List[Byte] supplied and converts it to a string. If it's not null terminated, it will append a null byte. - - Args: - bytes: The List[Byte] struct to convert. - - Returns: - The string representation of the List[Byte] struct. - """ - var copy = List[Byte](bytes) - if copy[-1] != 0: - copy.append(0) - return String(copy) diff --git a/gojo/builtins/errors.mojo b/gojo/builtins/errors.mojo deleted file mode 100644 index 1c1a7ba..0000000 --- a/gojo/builtins/errors.mojo +++ /dev/null @@ -1,12 +0,0 @@ -from sys import exit - - -fn panic[T: Stringable](message: T, code: Int = 1): - """Panics the program with the given message and exit code. - - Args: - message: The message to panic with. - code: The exit code to panic with. - """ - print("panic:", message) - exit(code) diff --git a/gojo/bytes/__init__.mojo b/gojo/bytes/__init__.mojo deleted file mode 100644 index 15170f2..0000000 --- a/gojo/bytes/__init__.mojo +++ /dev/null @@ -1,2 +0,0 @@ -from .buffer import Buffer, new_buffer -from .reader import Reader, new_reader diff --git a/gojo/bytes/buffer.mojo b/gojo/bytes/buffer.mojo deleted file mode 100644 index 668f9fe..0000000 --- a/gojo/bytes/buffer.mojo +++ /dev/null @@ -1,518 +0,0 @@ -import ..io -from ..builtins import copy, Byte, panic, index_byte -from algorithm.memory import parallel_memcpy - - -alias Rune = Int32 - -# SMALL_BUFFER_SIZE is an initial allocation minimal capacity. -alias SMALL_BUFFER_SIZE: Int = 64 - -# The ReadOp constants describe the last action performed on -# the buffer, so that unread_rune and unread_byte can check for -# invalid usage. op_read_runeX constants are chosen such that -# converted to Int they correspond to the rune size that was read. -alias ReadOp = Int8 - -# Don't use iota for these, as the values need to correspond with the -# names and comments, which is easier to see when being explicit. -alias OP_READ: ReadOp = -1 # Any other read operation. -alias OP_INVALID: ReadOp = 0 # Non-read operation. -alias OP_READ_RUNE1: ReadOp = 1 # read rune of size 1. -alias OP_READ_RUNE2: ReadOp = 2 # read rune of size 2. -alias OP_READ_RUNE3: ReadOp = 3 # read rune of size 3. -alias OP_READ_RUNE4: ReadOp = 4 # read rune of size 4. - -alias MAX_INT: Int = 2147483647 -# MIN_READ is the minimum slice size passed to a read call by -# [Buffer.read_from]. As long as the [Buffer] has at least MIN_READ bytes beyond -# what is required to hold the contents of r, read_from will not grow the -# underlying buffer. -alias MIN_READ: Int = 512 - -# ERR_TOO_LARGE is passed to panic if memory cannot be allocated to store data in a buffer. -alias ERR_TOO_LARGE = "buffer.Buffer: too large" -alias ERR_NEGATIVE_READ = "buffer.Buffer: reader returned negative count from read" -alias ERR_SHORT_WRITE = "short write" - - -struct Buffer( - Stringable, - Sized, - io.Reader, - io.Writer, - io.StringWriter, - io.ByteWriter, - io.ByteReader, -): - var _data: UnsafePointer[UInt8] # contents are the bytes buf[off : len(buf)] - var _size: Int - var _capacity: Int - var offset: Int # read at &buf[off], write at &buf[len(buf)] - var last_read: ReadOp # last read operation, so that unread* can work correctly. - - @always_inline - fn __init__(inout self, capacity: Int = io.BUFFER_SIZE): - self._capacity = capacity - self._size = 0 - self._data = UnsafePointer[UInt8]().alloc(capacity) - self.offset = 0 - self.last_read = OP_INVALID - - @always_inline - fn __init__(inout self, owned buf: List[Byte]): - self._capacity = buf.capacity - self._size = buf.size - self._data = buf.steal_data() - self.offset = 0 - self.last_read = OP_INVALID - - @always_inline - fn __init__(inout self, owned data: UnsafePointer[UInt8], capacity: Int, size: Int): - self._capacity = capacity - self._size = size - self._data = data - self.offset = 0 - self.last_read = OP_INVALID - - @always_inline - fn __moveinit__(inout self, owned other: Self): - self._data = other._data - self._size = other._size - self._capacity = other._capacity - self.offset = other.offset - self.last_read = other.last_read - other._data = UnsafePointer[UInt8]() - other._size = 0 - other._capacity = 0 - other.offset = 0 - other.last_read = OP_INVALID - - @always_inline - fn __del__(owned self): - if self._data: - self._data.free() - - @always_inline - fn __len__(self) -> Int: - """Returns the number of bytes of the unread portion of the buffer. - self._size - self.offset.""" - return self._size - self.offset - - @always_inline - fn bytes_ptr(self) -> UnsafePointer[UInt8]: - """Returns a pointer holding the unread portion of the buffer.""" - return self._data.offset(self.offset) - - @always_inline - fn bytes(self) -> List[UInt8]: - """Returns a list of bytes holding a copy of the unread portion of the buffer.""" - var copy = UnsafePointer[UInt8]().alloc(self._size) - memcpy(copy, self._data.offset(self.offset), self._size) - return List[UInt8](unsafe_pointer=copy, size=self._size - self.offset, capacity=self._size - self.offset) - - @always_inline - fn as_bytes_slice(self: Reference[Self]) -> Span[UInt8, self.is_mutable, self.lifetime]: - """Returns the internal data as a Span[UInt8].""" - return Span[UInt8, self.is_mutable, self.lifetime](unsafe_ptr=self[]._data, len=self[]._size) - - @always_inline - fn _resize(inout self, capacity: Int) -> None: - """ - Resizes the string builder buffer. - - Args: - capacity: The new capacity of the string builder buffer. - """ - var new_data = UnsafePointer[UInt8]().alloc(capacity) - memcpy(new_data, self._data, self._size) - self._data.free() - self._data = new_data - self._capacity = capacity - - return None - - @always_inline - fn _resize_if_needed(inout self, bytes_to_add: Int): - # TODO: Handle the case where new_capacity is greater than MAX_INT. It should panic. - if bytes_to_add > self._capacity - self._size: - var new_capacity = int(self._capacity * 2) - if new_capacity < self._capacity + bytes_to_add: - new_capacity = self._capacity + bytes_to_add - self._resize(new_capacity) - - @always_inline - fn __str__(self) -> String: - """ - Converts the string builder to a string. - - Returns: - The string representation of the string builder. Returns an empty - string if the string builder is empty. - """ - var copy = UnsafePointer[UInt8]().alloc(self._size) - memcpy(copy, self._data, self._size) - return StringRef(copy, self._size) - - @always_inline - fn render(self: Reference[Self]) -> StringSlice[self.is_mutable, self.lifetime]: - """ - Return a StringSlice view of the data owned by the builder. - Slightly faster than __str__, 10-20% faster in limited testing. - - Returns: - The string representation of the string builder. Returns an empty string if the string builder is empty. - """ - return StringSlice[self.is_mutable, self.lifetime]( - unsafe_from_utf8_strref=StringRef(self[]._data, self[]._size) - ) - - @always_inline - fn _write(inout self, src: Span[Byte]) -> (Int, Error): - """ - Appends a byte Span to the builder buffer. - - Args: - src: The byte array to append. - """ - self._resize_if_needed(len(src)) - - memcpy(self._data.offset(self._size), src._data, len(src)) - self._size += len(src) - - return len(src), Error() - - @always_inline - fn write(inout self, src: List[Byte]) -> (Int, Error): - """ - Appends a byte List to the builder buffer. - - Args: - src: The byte array to append. - """ - var span = Span(src) - - var bytes_read: Int - var err: Error - bytes_read, err = self._write(span) - - return bytes_read, err - - @always_inline - fn write_string(inout self, src: String) -> (Int, Error): - """ - Appends a string to the builder buffer. - - Args: - src: The string to append. - """ - return self.write(src.as_bytes_slice()) - - @always_inline - fn write_byte(inout self, byte: Byte) -> (Int, Error): - """Appends the byte c to the buffer, growing the buffer as needed. - The returned error is always nil, but is included to match [bufio.Writer]'s - write_byte. If the buffer becomes too large, write_byte will panic with - [ERR_TOO_LARGE]. - - Args: - byte: The byte to write to the buffer. - - Returns: - The number of bytes written to the buffer. - """ - self.last_read = OP_INVALID - self._resize_if_needed(1) - self._data[self._size] = byte - self._size += 1 - - return 1, Error() - - @always_inline - fn empty(self) -> Bool: - """Reports whether the unread portion of the buffer is empty.""" - return self._size <= self.offset - - @always_inline - fn reset(inout self): - """Resets the buffer to be empty, - but it retains the underlying storage for use by future writes. - reset is the same as [buffer.truncate](0).""" - if self._data: - self._data.free() - self._data = UnsafePointer[UInt8]().alloc(self._capacity) - self._size = 0 - self.offset = 0 - self.last_read = OP_INVALID - - @always_inline - fn _read(inout self, inout dest: Span[Byte, True], capacity: Int) -> (Int, Error): - """Reads the next len(dest) bytes from the buffer or until the buffer - is drained. The return value n is the number of bytes read. If the - buffer has no data to return, err is io.EOF (unless len(dest) is zero); - otherwise it is nil. - - Args: - dest: The buffer to read into. - capacity: The capacity of the destination buffer. - - Returns: - The number of bytes read from the buffer. - """ - self.last_read = OP_INVALID - if self.empty(): - # Buffer is empty, reset to recover space. - self.reset() - # TODO: How to check if the span's pointer has 0 capacity? We want to return early if the span can't receive any data. - if capacity == 0: - return 0, Error() - return 0, io.EOF - - # Copy the data of the internal buffer from offset to len(buf) into the destination buffer at the given index. - var bytes_read = copy(dest, self.as_bytes_slice()[self.offset :]) - dest._len += bytes_read - self.offset += bytes_read - - if bytes_read > 0: - self.last_read = OP_READ - - return bytes_read, Error() - - @always_inline - fn read(inout self, inout dest: List[Byte]) -> (Int, Error): - """Reads the next len(dest) bytes from the buffer or until the buffer - is drained. The return value n is the number of bytes read. If the - buffer has no data to return, err is io.EOF (unless len(dest) is zero); - otherwise it is nil. - - Args: - dest: The buffer to read into. - - Returns: - The number of bytes read from the buffer. - """ - var span = Span(dest) - - var bytes_read: Int - var err: Error - bytes_read, err = self._read(span, dest.capacity) - dest.size += bytes_read - - return bytes_read, err - - @always_inline - fn read_byte(inout self) -> (Byte, Error): - """Reads and returns the next byte from the buffer. - If no byte is available, it returns error io.EOF. - """ - if self.empty(): - # Buffer is empty, reset to recover space. - self.reset() - return Byte(0), io.EOF - - var byte = self._data[self.offset] - self.offset += 1 - self.last_read = OP_READ - - return byte, Error() - - @always_inline - fn unread_byte(inout self) -> Error: - """Unreads the last byte returned by the most recent successful - read operation that read at least one byte. If a write has happened since - the last read, if the last read returned an error, or if the read read zero - bytes, unread_byte returns an error. - """ - if self.last_read == OP_INVALID: - return Error("buffer.Buffer: unread_byte: previous operation was not a successful read") - - self.last_read = OP_INVALID - if self.offset > 0: - self.offset -= 1 - - return Error() - - @always_inline - fn read_bytes(inout self, delim: Byte) -> (List[Byte], Error): - """Reads until the first occurrence of delim in the input, - returning a slice containing the data up to and including the delimiter. - If read_bytes encounters an error before finding a delimiter, - it returns the data read before the error and the error itself (often io.EOF). - read_bytes returns err != nil if and only if the returned data does not end in - delim. - - Args: - delim: The delimiter to read until. - - Returns: - A List[Byte] struct containing the data up to and including the delimiter. - """ - var slice: Span[UInt8, True, __lifetime_of(self)] - var err: Error - slice, err = self.read_slice(delim) - - var bytes = List[Byte](capacity=len(slice) + 1) - for byte in slice: - bytes.append(byte[]) - - return bytes, err - - @always_inline - fn read_slice(self: Reference[Self, True], delim: Byte) -> (Span[UInt8, self.is_mutable, self.lifetime], Error): - """Like read_bytes but returns a reference to internal buffer data. - - Args: - delim: The delimiter to read until. - - Returns: - A List[Byte] struct containing the data up to and including the delimiter. - """ - var i = index_byte(bytes=self[].as_bytes_slice(), delim=delim) - var end = self[].offset + i + 1 - - var err = Error() - if i < 0: - end = self[]._size - err = Error(str(io.EOF)) - - var line = self[].as_bytes_slice()[self[].offset : end] - self[].offset = end - self[].last_read = OP_READ - - return line, err - - @always_inline - fn read_string(inout self, delim: Byte) -> (String, Error): - """Reads until the first occurrence of delim in the input, - returning a string containing the data up to and including the delimiter. - If read_string encounters an error before finding a delimiter, - it returns the data read before the error and the error itself (often io.EOF). - read_string returns err != nil if and only if the returned data does not end - in delim. - - Args: - delim: The delimiter to read until. - - Returns: - A string containing the data up to and including the delimiter. - """ - var bytes: List[UInt8] - var err: Error - bytes, err = self.read_bytes(delim) - bytes.append(0) - - return String(bytes), err - - @always_inline - fn next(self: Reference[Self, True], number_of_bytes: Int) raises -> Span[Byte, self.is_mutable, self.lifetime]: - """Returns a slice containing the next n bytes from the buffer, - advancing the buffer as if the bytes had been returned by [Buffer.read]. - If there are fewer than n bytes in the buffer, next returns the entire buffer. - The slice is only valid until the next call to a read or write method. - - Args: - number_of_bytes: The number of bytes to read from the buffer. - - Returns: - A slice containing the next n bytes from the buffer. - """ - self[].last_read = OP_INVALID - var bytes_remaining = len(self[]) - var bytes_to_read = number_of_bytes - if bytes_to_read > bytes_remaining: - bytes_to_read = bytes_remaining - - var data = self[].as_bytes_slice()[self[].offset : self[].offset + bytes_to_read] - - self[].offset += bytes_to_read - if bytes_to_read > 0: - self[].last_read = OP_READ - - return data - - # fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int, Error): - # """Writes data to w until the buffer is drained or an error occurs. - # The return value n is the number of bytes written; Any error - # encountered during the write is also returned. - - # Args: - # writer: The writer to write to. - - # Returns: - # The number of bytes written to the writer. - # """ - # self.last_read = OP_INVALID - # var bytes_to_write = len(self) - # var total_bytes_written: Int = 0 - - # if bytes_to_write > 0: - # var bytes_written: Int - # var err: Error - # bytes_written, err = writer.write(self.as_bytes_slice()[self.offset :]) - # if bytes_written > bytes_to_write: - # panic("bytes.Buffer.write_to: invalid write count") - - # self.offset += bytes_written - # total_bytes_written = bytes_written - # if err: - # return total_bytes_written, err - - # # all bytes should have been written, by definition of write method in io.Writer - # if bytes_written != bytes_to_write: - # return total_bytes_written, Error(ERR_SHORT_WRITE) - - # # Buffer is now empty; reset. - # self.reset() - # return total_bytes_written, Error() - - -fn new_buffer(capacity: Int = io.BUFFER_SIZE) -> Buffer: - """Creates and initializes a new [Buffer] using buf as its` - initial contents. The new [Buffer] takes ownership of buf, and the - caller should not use buf after this call. new_buffer is intended to - prepare a [Buffer] to read existing data. It can also be used to set - the initial size of the internal buffer for writing. To do that, - buf should have the desired capacity but a length of zero. - - In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is - sufficient to initialize a [Buffer]. - """ - var b = List[Byte](capacity=capacity) - return Buffer(b^) - - -fn new_buffer(owned buf: List[Byte]) -> Buffer: - """Creates and initializes a new [Buffer] using buf as its` - initial contents. The new [Buffer] takes ownership of buf, and the - caller should not use buf after this call. new_buffer is intended to - prepare a [Buffer] to read existing data. It can also be used to set - the initial size of the internal buffer for writing. To do that, - buf should have the desired capacity but a length of zero. - - In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is - sufficient to initialize a [Buffer]. - - Args: - buf: The bytes to use as the initial contents of the buffer. - - Returns: - A new [Buffer] initialized with the provided bytes. - """ - return Buffer(buf^) - - -fn new_buffer(owned s: String) -> Buffer: - """Creates and initializes a new [Buffer] using string s as its - initial contents. It is intended to prepare a buffer to read an existing - string. - - In most cases, new([Buffer]) (or just declaring a [Buffer] variable) is - sufficient to initialize a [Buffer]. - - Args: - s: The string to use as the initial contents of the buffer. - - Returns: - A new [Buffer] initialized with the provided string. - """ - return Buffer(s.as_bytes()) diff --git a/gojo/bytes/reader.mojo b/gojo/bytes/reader.mojo deleted file mode 100644 index 8948b9a..0000000 --- a/gojo/bytes/reader.mojo +++ /dev/null @@ -1,294 +0,0 @@ -from ..builtins import copy, panic -import ..io - - -# TODO: Maybe try a non owning reader, but I'm concerned about the lifetime of the buffer. -# Is making it unsafe a good idea? The source data would need to be ensured to outlive the reader by the user. -struct Reader( - Sized, - io.Reader, - io.ReaderAt, - io.WriterTo, - io.Seeker, - io.ByteReader, - io.ByteScanner, -): - """A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker, - io.ByteScanner, and io.RuneScanner Interfaces by reading from - a byte slice. - Unlike a [Buffer], a Reader is read-only and supports seeking. - The zero value for Reader operates like a Reader of an empty slice. - """ - - var data: UnsafePointer[UInt8] # contents are the bytes buf[index : size] - var size: Int - var capacity: Int - var index: Int # current reading index - var prev_rune: Int # index of previous rune; or < 0 - - @always_inline - fn __init__(inout self, owned buffer: List[UInt8]): - """Initializes a new [Reader.Reader] struct.""" - self.capacity = buffer.capacity - self.size = buffer.size - self.data = buffer.steal_data() - self.index = 0 - self.prev_rune = -1 - - @always_inline - fn __moveinit__(inout self, owned other: Reader): - """Initializes a new [Reader.Reader] struct by moving the data from another [Reader.Reader] struct.""" - self.capacity = other.capacity - self.size = other.size - self.data = other.data - self.index = other.index - self.prev_rune = other.prev_rune - - other.data = UnsafePointer[UInt8]() - other.size = 0 - other.capacity = 0 - other.index = 0 - other.prev_rune = -1 - - @always_inline - fn __len__(self) -> Int: - """len returns the number of bytes of the unread portion of the - slice.""" - return self.size - int(self.index) - - @always_inline - fn __del__(owned self): - if self.data: - self.data.free() - - @always_inline - fn as_bytes_slice(self: Reference[Self]) -> Span[UInt8, self.is_mutable, self.lifetime]: - """Returns the internal data as a Span[UInt8].""" - return Span[UInt8, self.is_mutable, self.lifetime](unsafe_ptr=self[].data, len=self[].size) - - @always_inline - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): - """Reads from the internal buffer into the dest List[UInt8] struct. - Implements the [io.Reader] Interface. - - Args: - dest: The destination Span[UInt8] struct to read into. - capacity: The capacity of the destination buffer. - - Returns: - Int: The number of bytes read into dest.""" - - if self.index >= self.size: - return 0, io.EOF - - # Copy the data of the internal buffer from offset to len(buf) into the destination buffer at the given index. - self.prev_rune = -1 - var bytes_written = copy(dest, self.as_bytes_slice()[self.index : self.size], len(dest)) - dest._len += bytes_written - self.index += bytes_written - - return bytes_written, Error() - - @always_inline - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): - """Reads from the internal buffer into the dest List[UInt8] struct. - Implements the [io.Reader] Interface. - - Args: - dest: The destination List[UInt8] struct to read into. - - Returns: - Int: The number of bytes read into dest.""" - var span = Span(dest) - - var bytes_read: Int - var err: Error - bytes_read, err = self._read(span, dest.capacity) - dest.size += bytes_read - - return bytes_read, err - - @always_inline - fn _read_at(self, inout dest: Span[UInt8, True], off: Int, capacity: Int) -> (Int, Error): - """Reads len(dest) bytes into dest beginning at byte offset off. - Implements the [io.ReaderAt] Interface. - - Args: - dest: The destination List[UInt8] struct to read into. - off: The offset to start reading from. - - Returns: - Int: The number of bytes read into dest. - """ - # cannot modify state - see io.ReaderAt - if off < 0: - return 0, Error("bytes.Reader.read_at: negative offset") - - if off >= Int(self.size): - return 0, io.EOF - - var unread_bytes = self.as_bytes_slice()[off : self.size] - var bytes_written = copy(dest, unread_bytes) - if bytes_written < len(dest): - return 0, io.EOF - - return bytes_written, Error() - - @always_inline - fn read_at(self, inout dest: List[UInt8], off: Int) -> (Int, Error): - """Reads len(dest) bytes into dest beginning at byte offset off. - Implements the [io.ReaderAt] Interface. - - Args: - dest: The destination List[UInt8] struct to read into. - off: The offset to start reading from. - - Returns: - Int: The number of bytes read into dest. - """ - var span = Span(dest) - - var bytes_read: Int - var err: Error - bytes_read, err = self._read_at(span, off, dest.capacity) - dest.size += bytes_read - - return bytes_read, err - - @always_inline - fn read_byte(inout self) -> (UInt8, Error): - """Reads and returns a single byte from the internal buffer. Implements the [io.ByteReader] Interface.""" - self.prev_rune = -1 - if self.index >= self.size: - return UInt8(0), io.EOF - - var byte = self.data[self.index] - self.index += 1 - return byte, Error() - - @always_inline - fn unread_byte(inout self) -> Error: - """Unreads the last byte read by moving the read position back by one. - Complements [Reader.read_byte] in implementing the [io.ByteScanner] Interface. - """ - if self.index <= 0: - return Error("bytes.Reader.unread_byte: at beginning of slice") - - self.prev_rune = -1 - self.index -= 1 - - return Error() - - # # read_rune implements the [io.RuneReader] Interface. - # fn read_rune(self) (ch rune, size Int, err error): - # if self.index >= Int(self.size): - # self.prev_rune = -1 - # return 0, 0, io.EOF - - # self.prev_rune = Int(self.index) - # if c := self.buffer[self.index]; c < utf8.RuneSelf: - # self.index+= 1 - # return rune(c), 1, nil - - # ch, size = utf8.DecodeRune(self.buffer[self.index:]) - # self.index += Int(size) - # return - - # # unread_rune complements [Reader.read_rune] in implementing the [io.RuneScanner] Interface. - # fn unread_rune(self) error: - # if self.index <= 0: - # return errors.New("bytes.Reader.unread_rune: at beginning of slice") - - # if self.prev_rune < 0: - # return errors.New("bytes.Reader.unread_rune: previous operation was not read_rune") - - # self.index = Int(self.prev_rune) - # self.prev_rune = -1 - # return nil - - @always_inline - fn seek(inout self, offset: Int, whence: Int) -> (Int, Error): - """Moves the read position to the specified offset from the specified whence. - - Args: - offset: The offset to move to. - whence: The reference point for offset. - - Returns: - The new position in which the next read will start from. - """ - self.prev_rune = -1 - var position: Int = 0 - - if whence == io.SEEK_START: - position = offset - elif whence == io.SEEK_CURRENT: - position = self.index + offset - elif whence == io.SEEK_END: - position = self.size + offset - else: - return Int(0), Error("bytes.Reader.seek: invalid whence") - - if position < 0: - return Int(0), Error("bytes.Reader.seek: negative position") - - self.index = position - return position, Error() - - @always_inline - fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int, Error): - """Writes data to w until the buffer is drained or an error occurs. - implements the [io.WriterTo] Interface. - - Args: - writer: The writer to write to. - """ - self.prev_rune = -1 - if self.index >= self.size: - return 0, Error() - - var bytes = self.as_bytes_slice()[self.index : self.size] - var write_count: Int - var err: Error - write_count, err = writer.write(bytes) - if write_count > len(bytes): - panic("bytes.Reader.write_to: invalid Write count") - - self.index += write_count - if write_count != len(bytes): - return write_count, io.ERR_SHORT_WRITE - - return write_count, Error() - - @always_inline - fn reset(inout self, owned buffer: List[UInt8]): - """Resets the [Reader.Reader] to be reading from buffer. - - Args: - buffer: The new buffer to read from. - """ - self.capacity = buffer.capacity - self.size = buffer.size - self.data = buffer.steal_data() - self.index = 0 - self.prev_rune = -1 - - -fn new_reader(owned buffer: List[UInt8]) -> Reader: - """Returns a new [Reader.Reader] reading from b. - - Args: - buffer: The new buffer to read from. - - """ - return Reader(buffer) - - -fn new_reader(owned buffer: String) -> Reader: - """Returns a new [Reader.Reader] reading from b. - - Args: - buffer: The new buffer to read from. - - """ - return Reader(buffer.as_bytes()) diff --git a/gojo/io/__init__.mojo b/gojo/io/__init__.mojo deleted file mode 100644 index fb4e9b6..0000000 --- a/gojo/io/__init__.mojo +++ /dev/null @@ -1,334 +0,0 @@ -from .io import write_string, read_at_least, read_full, read_all, BUFFER_SIZE -from .file import FileWrapper -from .std import STDWriter - - -alias Rune = Int32 - -# Package io provides basic interfaces to I/O primitives. -# Its primary job is to wrap existing implementations of such primitives, -# such as those in package os, into shared public interfaces that -# abstract the fntionality, plus some other related primitives. -# -# Because these interfaces and primitives wrap lower-level operations with -# various implementations, unless otherwise informed clients should not -# assume they are safe for parallel execution. -# Seek whence values. -alias SEEK_START = 0 # seek relative to the origin of the file -alias SEEK_CURRENT = 1 # seek relative to the current offset -alias SEEK_END = 2 # seek relative to the end - -# ERR_SHORT_WRITE means that a write accepted fewer bytes than requested -# but failed to return an explicit error. -alias ERR_SHORT_WRITE = Error("short write") - -# ERR_INVALID_WRITE means that a write returned an impossible count. -alias ERR_INVALID_WRITE = Error("invalid write result") - -# ERR_SHORT_BUFFER means that a read required a longer buffer than was provided. -alias ERR_SHORT_BUFFER = Error("short buffer") - -# EOF is the error returned by Read when no more input is available. -# (Read must return EOF itself, not an error wrapping EOF, -# because callers will test for EOF using ==.) -# fntions should return EOF only to signal a graceful end of input. -# If the EOF occurs unexpectedly in a structured data stream, -# the appropriate error is either [ERR_UNEXPECTED_EOF] or some other error -# giving more detail. -alias EOF = Error("EOF") - -# ERR_UNEXPECTED_EOF means that EOF was encountered in the -# middle of reading a fixed-size block or data structure. -alias ERR_UNEXPECTED_EOF = Error("unexpected EOF") - -# ERR_NO_PROGRESS is returned by some clients of a [Reader] when -# many calls to Read have failed to return any data or error, -# usually the sign of a broken [Reader] implementation. -alias ERR_NO_PROGRESS = Error("multiple Read calls return no data or error") - - -trait Reader(Movable): - """Reader is the trait that wraps the basic Read method. - - Read reads up to len(p) bytes into p. It returns the number of bytes - read (0 <= n <= len(p)) and any error encountered. Even if Read - returns n < len(p), it may use all of p as scratch space during the call. - If some data is available but not len(p) bytes, Read conventionally - returns what is available instead of waiting for more. - - When Read encounters an error or end-of-file condition after - successfully reading n > 0 bytes, it returns the number of - bytes read. It may return the (non-nil) error from the same call - or return the error (and n == 0) from a subsequent call. - An instance of this general case is that a Reader returning - a non-zero number of bytes at the end of the input stream may - return either err == EOF or err == nil. The next Read should - return 0, EOF. - - Callers should always process the n > 0 bytes returned before - considering the error err. Doing so correctly handles I/O errors - that happen after reading some bytes and also both of the - allowed EOF behaviors. - - If len(p) == 0, Read should always return n == 0. It may return a - non-nil error if some error condition is known, such as EOF. - - Implementations of Read are discouraged from returning a - zero byte count with a nil error, except when len(p) == 0. - Callers should treat a return of 0 and nil as indicating that - nothing happened; in particular it does not indicate EOF. - - Implementations must not retain p.""" - - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): - ... - - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): - ... - - -trait Writer(Movable): - """Writer is the trait that wraps the basic Write method. - - Write writes len(p) bytes from p to the underlying data stream. - It returns the number of bytes written from p (0 <= n <= len(p)) - and any error encountered that caused the write to stop early. - Write must return a non-nil error if it returns n < len(p). - Write must not modify the slice data, even temporarily. - - Implementations must not retain p. - """ - - # fn _write(inout self, src: Span[UInt8]) -> (Int, Error): - # ... - - fn write(inout self, src: List[UInt8]) -> (Int, Error): - ... - - -trait Closer(Movable): - """ - Closer is the trait that wraps the basic Close method. - - The behavior of Close after the first call is undefined. - Specific implementations may document their own behavior. - """ - - fn close(inout self) -> Error: - ... - - -trait Seeker(Movable): - """ - Seeker is the trait that wraps the basic Seek method. - - Seek sets the offset for the next Read or Write to offset, - interpreted according to whence: - [SEEK_START] means relative to the start of the file, - [SEEK_CURRENT] means relative to the current offset, and - [SEEK_END] means relative to the end - (for example, offset = -2 specifies the penultimate byte of the file). - Seek returns the new offset relative to the start of the - file or an error, if any. - - Seeking to an offset before the start of the file is an error. - Seeking to any positive offset may be allowed, but if the new offset exceeds - the size of the underlying object the behavior of subsequent I/O operations - is implementation-dependent. - """ - - fn seek(inout self, offset: Int, whence: Int) -> (Int, Error): - ... - - -trait ReadWriter(Reader, Writer): - ... - - -trait ReadCloser(Reader, Closer): - ... - - -trait WriteCloser(Writer, Closer): - ... - - -trait ReadWriteCloser(Reader, Writer, Closer): - ... - - -trait ReadSeeker(Reader, Seeker): - ... - - -trait ReadSeekCloser(Reader, Seeker, Closer): - ... - - -trait WriteSeeker(Writer, Seeker): - ... - - -trait ReadWriteSeeker(Reader, Writer, Seeker): - ... - - -trait ReaderFrom: - """ReaderFrom is the trait that wraps the ReadFrom method. - - ReadFrom reads data from r until EOF or error. - The return value n is the number of bytes read. - Any error except EOF encountered during the read is also returned. - - The [copy] function uses [ReaderFrom] if available.""" - - fn read_from[R: Reader](inout self, inout reader: R) -> (Int, Error): - ... - - -trait WriterReadFrom(Writer, ReaderFrom): - ... - - -trait WriterTo: - """WriterTo is the trait that wraps the WriteTo method. - - WriteTo writes data to w until there's no more data to write or - when an error occurs. The return value n is the number of bytes - written. Any error encountered during the write is also returned. - - The copy function uses WriterTo if available.""" - - fn write_to[W: Writer](inout self, inout writer: W) -> (Int, Error): - ... - - -trait ReaderWriteTo(Reader, WriterTo): - ... - - -trait ReaderAt: - """ReaderAt is the trait that wraps the basic ReadAt method. - - ReadAt reads len(p) bytes into p starting at offset off in the - underlying input source. It returns the number of bytes - read (0 <= n <= len(p)) and any error encountered. - - When ReadAt returns n < len(p), it returns a non-nil error - explaining why more bytes were not returned. In this respect, - ReadAt is stricter than Read. - - Even if ReadAt returns n < len(p), it may use all of p as scratch - space during the call. If some data is available but not len(p) bytes, - ReadAt blocks until either all the data is available or an error occurs. - In this respect ReadAt is different from Read. - - If the n = len(p) bytes returned by ReadAt are at the end of the - input source, ReadAt may return either err == EOF or err == nil. - - If ReadAt is reading from an input source with a seek offset, - ReadAt should not affect nor be affected by the underlying - seek offset. - - Clients of ReadAt can execute parallel ReadAt calls on the - same input source. - - Implementations must not retain p.""" - - fn read_at(self, inout dest: List[UInt8], off: Int) -> (Int, Error): - ... - - fn _read_at(self, inout dest: Span[UInt8, True], off: Int, capacity: Int) -> (Int, Error): - ... - - -trait WriterAt: - """WriterAt is the trait that wraps the basic WriteAt method. - - WriteAt writes len(p) bytes from p to the underlying data stream - at offset off. It returns the number of bytes written from p (0 <= n <= len(p)) - and any error encountered that caused the write to stop early. - WriteAt must return a non-nil error if it returns n < len(p). - - If WriteAt is writing to a destination with a seek offset, - WriteAt should not affect nor be affected by the underlying - seek offset. - - Clients of WriteAt can execute parallel WriteAt calls on the same - destination if the ranges do not overlap. - - Implementations must not retain p.""" - - fn _write_at(self, src: Span[UInt8], off: Int) -> (Int, Error): - ... - - fn write_at(self, src: List[UInt8], off: Int) -> (Int, Error): - ... - - -trait ByteReader: - """ByteReader is the trait that wraps the read_byte method. - - read_byte reads and returns the next byte from the input or - any error encountered. If read_byte returns an error, no input - byte was consumed, and the returned byte value is undefined. - - read_byte provides an efficient trait for byte-at-time - processing. A [Reader] that does not implement ByteReader - can be wrapped using bufio.NewReader to add this method.""" - - fn read_byte(inout self) -> (UInt8, Error): - ... - - -trait ByteScanner(ByteReader): - """ByteScanner is the trait that adds the unread_byte method to the - basic read_byte method. - - unread_byte causes the next call to read_byte to return the last byte read. - If the last operation was not a successful call to read_byte, unread_byte may - return an error, unread the last byte read (or the byte prior to the - last-unread byte), or (in implementations that support the [Seeker] trait) - seek to one byte before the current offset.""" - - fn unread_byte(inout self) -> Error: - ... - - -trait ByteWriter: - """ByteWriter is the trait that wraps the write_byte method.""" - - fn write_byte(inout self, byte: UInt8) -> (Int, Error): - ... - - -trait RuneReader: - """RuneReader is the trait that wraps the read_rune method. - - read_rune reads a single encoded Unicode character - and returns the rune and its size in bytes. If no character is - available, err will be set.""" - - fn read_rune(inout self) -> (Rune, Int): - ... - - -trait RuneScanner(RuneReader): - """RuneScanner is the trait that adds the unread_rune method to the - basic read_rune method. - - unread_rune causes the next call to read_rune to return the last rune read. - If the last operation was not a successful call to read_rune, unread_rune may - return an error, unread the last rune read (or the rune prior to the - last-unread rune), or (in implementations that support the [Seeker] trait) - seek to the start of the rune before the current offset.""" - - fn unread_rune(inout self) -> Rune: - ... - - -trait StringWriter: - """StringWriter is the trait that wraps the WriteString method.""" - - fn write_string(inout self, src: String) -> (Int, Error): - ... diff --git a/gojo/io/file.mojo b/gojo/io/file.mojo deleted file mode 100644 index 443ae93..0000000 --- a/gojo/io/file.mojo +++ /dev/null @@ -1,148 +0,0 @@ -import ..io -from ..builtins import copy -from ..syscall import FileDescriptorBase - - -struct FileWrapper(FileDescriptorBase, io.ByteReader): - var handle: FileHandle - - @always_inline - fn __init__(inout self, path: String, mode: String) raises: - self.handle = open(path, mode) - - @always_inline - fn __moveinit__(inout self, owned existing: Self): - self.handle = existing.handle^ - - @always_inline - fn __del__(owned self): - var err = self.close() - if err: - # TODO: __del__ can't raise, but there should be some fallback. - print(str(err)) - - @always_inline - fn close(inout self) -> Error: - try: - self.handle.close() - except e: - return e - - return Error() - - @always_inline - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): - """Read from the file handle into dest's pointer. - Pretty hacky way to force the filehandle read into the defined trait, and it's unsafe since we're - reading directly into the pointer. - """ - # var bytes_to_read = dest.capacity - len(dest) - var bytes_read: Int - var result: List[UInt8] - try: - result = self.handle.read_bytes() - bytes_read = len(result) - # TODO: Need to raise an Issue for this. Reading with pointer does not return an accurate count of bytes_read :( - # bytes_read = int(self.handle.read(DTypePointer[DType.uint8](dest.unsafe_ptr()) + dest.size)) - except e: - return 0, e - - _ = copy(dest, Span(result), len(dest)) - - if bytes_read == 0: - return bytes_read, io.EOF - - return bytes_read, Error() - - @always_inline - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): - """Read from the file handle into dest's pointer. - Pretty hacky way to force the filehandle read into the defined trait, and it's unsafe since we're - reading directly into the pointer. - """ - # var bytes_to_read = dest.capacity - len(dest) - var bytes_read: Int - var result: List[UInt8] - try: - result = self.handle.read_bytes() - bytes_read = len(result) - # TODO: Need to raise an Issue for this. Reading with pointer does not return an accurate count of bytes_read :( - # bytes_read = int(self.handle.read(DTypePointer[DType.uint8](dest.unsafe_ptr()) + dest.size)) - except e: - return 0, e - - _ = copy(dest, result, len(dest)) - - if bytes_read == 0: - return bytes_read, io.EOF - - return bytes_read, Error() - - @always_inline - fn read_all(inout self) -> (List[UInt8], Error): - var bytes = List[UInt8](capacity=io.BUFFER_SIZE) - while True: - var temp = List[UInt8](capacity=io.BUFFER_SIZE) - _ = self.read(temp) - - # If new bytes will overflow the result, resize it. - if len(bytes) + len(temp) > bytes.capacity: - bytes.reserve(bytes.capacity * 2) - bytes.extend(temp) - - if len(temp) < io.BUFFER_SIZE: - return bytes, io.EOF - - @always_inline - fn read_byte(inout self) -> (UInt8, Error): - try: - var bytes: List[UInt8] - var err: Error - bytes, err = self.read_bytes(1) - return bytes[0], Error() - except e: - return UInt8(0), e - - @always_inline - fn read_bytes(inout self, size: Int = -1) raises -> (List[UInt8], Error): - try: - return self.handle.read_bytes(size), Error() - except e: - return List[UInt8](), e - - @always_inline - fn stream_until_delimiter(inout self, inout dest: List[UInt8], delimiter: UInt8, max_size: Int) -> Error: - var byte: UInt8 - var err = Error() - for _ in range(max_size): - byte, err = self.read_byte() - if err: - return err - - if byte == delimiter: - return err - dest.append(byte) - return Error("Stream too long") - - @always_inline - fn seek(inout self, offset: Int, whence: Int = 0) -> (Int, Error): - try: - var position = self.handle.seek(UInt64(offset), whence) - return int(position), Error() - except e: - return 0, e - - @always_inline - fn _write(inout self, src: Span[UInt8]) -> (Int, Error): - if len(src) == 0: - return 0, Error("No data to write") - - try: - self.handle.write(src.unsafe_ptr()) - return len(src), io.EOF - except e: - return 0, Error(str(e)) - - @always_inline - fn write(inout self, src: List[UInt8]) -> (Int, Error): - return self._write(Span(src)) diff --git a/gojo/io/io.mojo b/gojo/io/io.mojo deleted file mode 100644 index 87b7da2..0000000 --- a/gojo/io/io.mojo +++ /dev/null @@ -1,440 +0,0 @@ -from ..builtins import copy, Byte, panic - -alias BUFFER_SIZE = 4096 - - -fn write_string[W: Writer](inout writer: W, string: String) -> (Int, Error): - """Writes the contents of the string s to w, which accepts a slice of bytes. - If w implements [StringWriter], [StringWriter.write_string] is invoked directly. - Otherwise, [Writer.write] is called exactly once. - - Args: - writer: The writer to write to. - string: The string to write. - - Returns: - The number of bytes written and an error, if any. - """ - return writer.write(string.as_bytes()) - - -fn write_string[W: StringWriter](inout writer: W, string: String) -> (Int, Error): - """Writes the contents of the string s to w, which accepts a slice of bytes. - If w implements [StringWriter], [StringWriter.write_string] is invoked directly. - Otherwise, [Writer.write] is called exactly once. - - Args: - writer: The writer to write to. - string: The string to write. - - Returns: - The number of bytes written and an error, if any.""" - return writer.write_string(string) - - -fn read_at_least[R: Reader](inout reader: R, inout dest: List[Byte], min: Int) -> (Int, Error): - """Reads from r into buf until it has read at least min bytes. - It returns the number of bytes copied and an error if fewer bytes were read. - The error is EOF only if no bytes were read. - If an EOF happens after reading fewer than min bytes, - read_at_least returns [ERR_UNEXPECTED_EOF]. - If min is greater than the length of buf, read_at_least returns [ERR_SHORT_BUFFER]. - On return, n >= min if and only if err == nil. - If r returns an error having read at least min bytes, the error is dropped. - - Args: - reader: The reader to read from. - dest: The buffer to read into. - min: The minimum number of bytes to read. - - Returns: - The number of bytes read.""" - var error = Error() - if len(dest) < min: - return 0, io.ERR_SHORT_BUFFER - - var total_bytes_read: Int = 0 - while total_bytes_read < min and not error: - var bytes_read: Int - bytes_read, error = reader.read(dest) - total_bytes_read += bytes_read - - if total_bytes_read >= min: - error = Error() - - elif total_bytes_read > 0 and str(error): - error = ERR_UNEXPECTED_EOF - - return total_bytes_read, error - - -fn read_full[R: Reader](inout reader: R, inout dest: List[Byte]) -> (Int, Error): - """Reads exactly len(buf) bytes from r into buf. - It returns the number of bytes copied and an error if fewer bytes were read. - The error is EOF only if no bytes were read. - If an EOF happens after reading some but not all the bytes, - read_full returns [ERR_UNEXPECTED_EOF]. - On return, n == len(buf) if and only if err == nil. - If r returns an error having read at least len(buf) bytes, the error is dropped. - """ - return read_at_least(reader, dest, len(dest)) - - -# fn copy_n[W: Writer, R: Reader](dst: W, src: R, n: Int) raises -> Int: -# """Copies n bytes (or until an error) from src to dst. -# It returns the number of bytes copied and the earliest -# error encountered while copying. -# On return, written == n if and only if err == nil. - -# If dst implements [ReaderFrom], the copy is implemented using it. -# """ -# var written = copy(dst, LimitReader(src, n)) -# if written == n: -# return n - -# if written < n: -# # src stopped early; must have been EOF. -# raise Error(ERR_UNEXPECTED_EOF) - -# return written - - -# fn copy[W: Writer, R: Reader](dst: W, src: R, n: Int) -> Int: -# """copy copies from src to dst until either EOF is reached -# on src or an error occurs. It returns the number of bytes -# copied and the first error encountered while copying, if any. - -# A successful copy returns err == nil, not err == EOF. -# Because copy is defined to read from src until EOF, it does -# not treat an EOF from Read as an error to be reported. - -# If src implements [WriterTo], -# the copy is implemented by calling src.WriteTo(dst). -# Otherwise, if dst implements [ReaderFrom], -# the copy is implemented by calling dst.ReadFrom(src). -# """ -# return copy_buffer(dst, src, nil) - -# # CopyBuffer is identical to copy except that it stages through the -# # provided buffer (if one is required) rather than allocating a -# # temporary one. If buf is nil, one is allocated; otherwise if it has -# # zero length, CopyBuffer panics. -# # -# # If either src implements [WriterTo] or dst implements [ReaderFrom], -# # buf will not be used to perform the copy. -# fn CopyBuffer(dst Writer, src Reader, buf bytes) (written Int, err error) { -# if buf != nil and len(buf) == 0 { -# panic("empty buffer in CopyBuffer") -# } -# return copy_buffer(dst, src, buf) -# } - - -# fn copy_buffer[W: Writer, R: Reader](dst: W, src: R, buf: Span[Byte]) raises -> Int: -# """Actual implementation of copy and CopyBuffer. -# if buf is nil, one is allocated. -# """ -# var nr: Int -# nr = src.read(buf) -# while True: -# if nr > 0: -# var nw: Int -# nw = dst.write(get_slice(buf, 0, nr)) -# if nw < 0 or nr < nw: -# nw = 0 - -# var written = Int(nw) -# if nr != nw: -# raise Error(ERR_SHORT_WRITE) - -# return written - - -# fn copy_buffer[W: Writer, R: ReaderWriteTo](dst: W, src: R, buf: Span[Byte]) -> Int: -# return src.write_to(dst) - - -# fn copy_buffer[W: WriterReadFrom, R: Reader](dst: W, src: R, buf: Span[Byte]) -> Int: -# return dst.read_from(src) - -# # LimitReader returns a Reader that reads from r -# # but stops with EOF after n bytes. -# # The underlying implementation is a *LimitedReader. -# fn LimitReader(r Reader, n Int) Reader { return &LimitedReader{r, n} } - -# # A LimitedReader reads from R but limits the amount of -# # data returned to just N bytes. Each call to Read -# # updates N to reflect the new amount remaining. -# # Read returns EOF when N <= 0 or when the underlying R returns EOF. -# struct LimitedReader(): -# var R: Reader # underlying reader -# N Int # max bytes remaining - -# fn (l *LimitedReader) Read(p bytes) (n Int, err error) { -# if l.N <= 0 { -# return 0, EOF -# } -# if Int(len(p)) > l.N { -# p = p[0:l.N] -# } -# n, err = l.R.Read(p) -# l.N -= Int(n) -# return -# } - -# # NewSectionReader returns a [SectionReader] that reads from r -# # starting at offset off and stops with EOF after n bytes. -# fn NewSectionReader(r ReaderAt, off Int, n Int) *SectionReader { -# var remaining Int -# const maxInt = 1<<63 - 1 -# if off <= maxInt-n { -# remaining = n + off -# } else { -# # Overflow, with no way to return error. -# # Assume we can read up to an offset of 1<<63 - 1. -# remaining = maxInt -# } -# return &SectionReader{r, off, off, remaining, n} -# } - -# # SectionReader implements Read, Seek, and ReadAt on a section -# # of an underlying [ReaderAt]. -# type SectionReader struct { -# r ReaderAt # constant after creation -# base Int # constant after creation -# off Int -# limit Int # constant after creation -# n Int # constant after creation -# } - -# fn (s *SectionReader) Read(p bytes) (n Int, err error) { -# if s.off >= s.limit { -# return 0, EOF -# } -# if max := s.limit - s.off; Int(len(p)) > max { -# p = p[0:max] -# } -# n, err = s.r.ReadAt(p, s.off) -# s.off += Int(n) -# return -# } - -# alias errWhence = "Seek: invalid whence" -# alias errOffset = "Seek: invalid offset" - -# fn (s *SectionReader) Seek(offset Int, whence Int) (Int, error) { -# switch whence { -# default: -# return 0, errWhence -# case SEEK_START: -# offset += s.base -# case SEEK_CURRENT: -# offset += s.off -# case SEEK_END: -# offset += s.limit -# } -# if offset < s.base { -# return 0, errOffset -# } -# s.off = offset -# return offset - s.base, nil -# } - -# fn (s *SectionReader) ReadAt(p bytes, off Int) (n Int, err error) { -# if off < 0 or off >= s.capacity { -# return 0, EOF -# } -# off += s.base -# if max := s.limit - off; Int(len(p)) > max { -# p = p[0:max] -# n, err = s.r.ReadAt(p, off) -# if err == nil { -# err = EOF -# } -# return n, err -# } -# return s.r.ReadAt(p, off) -# } - -# # Size returns the size of the section in bytes. -# fn (s *SectionReader) Size() Int { return s.limit - s.base } - -# # Outer returns the underlying [ReaderAt] and offsets for the section. -# # -# # The returned values are the same that were passed to [NewSectionReader] -# # when the [SectionReader] was created. -# fn (s *SectionReader) Outer() (r ReaderAt, off Int, n Int) { -# return s.r, s.base, s.n -# } - -# # An OffsetWriter maps writes at offset base to offset base+off in the underlying writer. -# type OffsetWriter struct { -# w WriterAt -# base Int # the original offset -# off Int # the current offset -# } - -# # NewOffsetWriter returns an [OffsetWriter] that writes to w -# # starting at offset off. -# fn NewOffsetWriter(w WriterAt, off Int) *OffsetWriter { -# return &OffsetWriter{w, off, off} -# } - -# fn (o *OffsetWriter) Write(p bytes) (n Int, err error) { -# n, err = o.w.WriteAt(p, o.off) -# o.off += Int(n) -# return -# } - -# fn (o *OffsetWriter) WriteAt(p bytes, off Int) (n Int, err error) { -# if off < 0 { -# return 0, errOffset -# } - -# off += o.base -# return o.w.WriteAt(p, off) -# } - -# fn (o *OffsetWriter) Seek(offset Int, whence Int) (Int, error) { -# switch whence { -# default: -# return 0, errWhence -# case SEEK_START: -# offset += o.base -# case SEEK_CURRENT: -# offset += o.off -# } -# if offset < o.base { -# return 0, errOffset -# } -# o.off = offset -# return offset - o.base, nil -# } - -# # TeeReader returns a [Reader] that writes to w what it reads from r. -# # All reads from r performed through it are matched with -# # corresponding writes to w. There is no internal buffering - -# # the write must complete before the read completes. -# # Any error encountered while writing is reported as a read error. -# fn TeeReader(r Reader, w Writer) Reader { -# return &teeReader{r, w} -# } - -# type teeReader struct { -# r Reader -# w Writer -# } - -# fn (t *teeReader) Read(p bytes) (n Int, err error) { -# n, err = t.r.Read(p) -# if n > 0 { -# if n, err := t.w.Write(p[:n]); err != nil { -# return n, err -# } -# } -# return -# } - -# # Discard is a [Writer] on which all Write calls succeed -# # without doing anything. -# var Discard Writer = discard{} - -# type discard struct{} - -# # discard implements ReaderFrom as an optimization so copy to -# # io.Discard can avoid doing unnecessary work. -# var _ ReaderFrom = discard{} - -# fn (discard) Write(p bytes) (Int, error) { -# return len(p), nil -# } - -# fn (discard) write_string(s string) (Int, error) { -# return len(s), nil -# } - -# var blackHolePool = sync.Pool{ -# New: fn() any { -# b := make(bytes, 8192) -# return &b -# }, -# } - -# fn (discard) ReadFrom(r Reader) (n Int, err error) { -# bufp := blackHolePool.Get().(*bytes) -# readSize := 0 -# for { -# readSize, err = r.Read(*bufp) -# n += Int(readSize) -# if err != nil { -# blackHolePool.Put(bufp) -# if err == EOF { -# return n, nil -# } -# return -# } -# } -# } - -# # NopCloser returns a [ReadCloser] with a no-op Close method wrapping -# # the provided [Reader] r. -# # If r implements [WriterTo], the returned [ReadCloser] will implement [WriterTo] -# # by forwarding calls to r. -# fn NopCloser(r Reader) ReadCloser { -# if _, ok := r.(WriterTo); ok { -# return nopCloserWriterTo{r} -# } -# return nopCloser{r} -# } - -# type nopCloser struct { -# Reader -# } - -# fn (nopCloser) Close() error { return nil } - -# type nopCloserWriterTo struct { -# Reader -# } - -# fn (nopCloserWriterTo) Close() error { return nil } - -# fn (c nopCloserWriterTo) WriteTo(w Writer) (n Int, err error) { -# return c.Reader.(WriterTo).WriteTo(w) -# } - - -# TODO: read directly into dest -fn read_all[R: Reader](inout reader: R) -> (List[Byte], Error): - """Reads from r until an error or EOF and returns the data it read. - A successful call returns err == nil, not err == EOF. Because ReadAll is - defined to read from src until EOF, it does not treat an EOF from Read - as an error to be reported. - - Args: - reader: The reader to read from. - - Returns: - The data read.""" - var dest = List[Byte](capacity=BUFFER_SIZE) - var at_eof: Bool = False - - while True: - var temp = List[Byte](capacity=BUFFER_SIZE) - var bytes_read: Int - var err: Error - bytes_read, err = reader.read(temp) - if str(err) != "": - if str(err) != str(EOF): - return dest, err - - at_eof = True - - # If new bytes will overflow the result, resize it. - # if some bytes were written, how do I append before returning result on the last one? - if len(dest) + len(temp) > dest.capacity: - dest.reserve(dest.capacity * 2) - dest.extend(temp) - - if at_eof: - return dest, err diff --git a/gojo/strings/__init__.mojo b/gojo/strings/__init__.mojo deleted file mode 100644 index fbcb44e..0000000 --- a/gojo/strings/__init__.mojo +++ /dev/null @@ -1,2 +0,0 @@ -from .builder import StringBuilder -from .reader import Reader, new_reader diff --git a/gojo/unicode/__init__.mojo b/gojo/unicode/__init__.mojo deleted file mode 100644 index b300770..0000000 --- a/gojo/unicode/__init__.mojo +++ /dev/null @@ -1 +0,0 @@ -from .utf8 import rune_count_in_string, UnicodeString, rune_width, string_width, Condition, DEFAULT_CONDITION diff --git a/gojo/unicode/utf8/string.mojo b/gojo/unicode/utf8/string.mojo deleted file mode 100644 index f0c852d..0000000 --- a/gojo/unicode/utf8/string.mojo +++ /dev/null @@ -1,109 +0,0 @@ -from bit import countl_zero -from algorithm.functional import vectorize -from sys.info import simdwidthof - - -alias simd_width_u8 = simdwidthof[DType.uint8]() - - -@value -struct UnicodeString(Stringable, Sized): - """A string that supports Unicode characters. - - The algorithms to handle UTF-8 are from @maxim on the Mojo Discord. Thanks! - """ - - var inner: String - - @always_inline - fn __init__(inout self, owned s: String): - self.inner = s^ - - @always_inline - fn __init__(inout self, owned bytes: List[UInt8]): - if bytes[-1] != 0: - bytes.append(0) - self.inner = String(bytes^) - - @always_inline - fn __len__(self) -> Int: - """Count the number of runes in a string. - - Returns: - The number of runes in the string. - """ - var data = DTypePointer[DType.uint8](self.inner.unsafe_uint8_ptr()) - var byte_count = len(self.inner) - var result = 0 - - @parameter - fn count[simd_width: Int](offset: Int): - result += int(((data.load[width=simd_width](offset) >> 6) != 0b10).cast[DType.uint8]().reduce_add()) - - vectorize[count, simd_width_u8](byte_count) - return result - - @always_inline - fn __str__(self) -> String: - return self.inner - - @always_inline - fn __getitem__(self: Reference[Self], slice: Slice) -> StringSlice[self.is_mutable, self.lifetime]: - """TODO: Doesn't handle negative indices.""" - var bytes_left = len(self[].inner) - var total_char_length: Int = 0 - for _ in range(slice.start, slice.end): - # Number of bytes of the current character - var char_length = int( - (DTypePointer[DType.uint8](self[].inner.unsafe_uint8_ptr() + total_char_length).load() >> 7 == 0).cast[ - DType.uint8 - ]() - * 1 - + countl_zero(~DTypePointer[DType.uint8](self[].inner.unsafe_uint8_ptr() + total_char_length).load()) - ) - - # Move iterator forward - bytes_left -= char_length - total_char_length += char_length - - return StringSlice[self.is_mutable, self.lifetime]( - unsafe_from_utf8_ptr=self[].inner.unsafe_uint8_ptr(), len=total_char_length - ) - - @always_inline - fn bytecount(self) -> Int: - return len(self.inner) - - @always_inline - fn __iter__( - self: Reference[Self], - ) -> _StringIter[self.is_mutable, self.lifetime]: - return _StringIter(self[].inner) - - -@value -struct _StringIter[mutability: Bool, lifetime: AnyLifetime[mutability].type](): - var bytes_left: Int - var ptr: UnsafePointer[UInt8] - - @always_inline - fn __init__(inout self, src: Reference[String, mutability, lifetime]): - self.bytes_left = len(src[]) - self.ptr = src[].unsafe_uint8_ptr() - - fn __next__(inout self) -> StringSlice[mutability, lifetime]: - # Number of bytes of the current character - var char_length = int( - (DTypePointer[DType.uint8](self.ptr).load() >> 7 == 0).cast[DType.uint8]() * 1 - + countl_zero(~DTypePointer[DType.uint8](self.ptr).load()) - ) - - # Move iterator forward - self.bytes_left -= char_length - self.ptr += char_length - - return StringSlice[mutability, lifetime](unsafe_from_utf8_ptr=self.ptr - char_length, len=char_length) - - @always_inline - fn __len__(self) -> Int: - return self.bytes_left diff --git a/magic.lock b/magic.lock new file mode 100644 index 0000000..6c8a15d --- /dev/null +++ b/magic.lock @@ -0,0 +1,1577 @@ +version: 5 +environments: + default: + channels: + - url: https://conda.anaconda.org/conda-forge/ + - url: https://conda.modular.com/max/ + packages: + linux-64: + - conda: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-h4bc722e_7.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.8.30-hbcca054_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/click-8.1.7-unix_pyh707e725_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.5.0-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-8.5.0-hd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.6.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/jupyter_core-5.7.2-py312h7900ff3_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.1-h166bdaf_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-hf3520f5_7.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-23_linux64_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-23_linux64_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20191231-he28a2e2_2.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.3-h5888daf_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.1.0-h77fa898_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.1.0-h69a702a_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-14.1.0-h69a702a_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-ng-14.1.0-h69a702a_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-14.1.0-hc5f4f2c_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.1.0-h77fa898_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-23_linux64_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.27-pthreads_hac2b453_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.20-h4ab18f5_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.46.1-hadc24fc_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-14.1.0-hc0a3c3a_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-14.1.0-h4852527_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-h4ab18f5_1.conda + - conda: https://conda.modular.com/max/noarch/max-24.5.0-release.conda + - conda: https://conda.modular.com/max/linux-64/max-core-24.5.0-release.conda + - conda: https://conda.modular.com/max/linux-64/max-python-24.5.0-3.12release.conda + - conda: https://conda.modular.com/max/noarch/mblack-24.5.0-release.conda + - conda: https://conda.modular.com/max/noarch/mojo-jupyter-24.5.0-release.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-he02047a_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/numpy-1.26.4-py312heda63a1_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.3.2-hb9d3cd8_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-24.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pathspec-0.12.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.3.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.12.5-h2ad013b_0_cpython.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.12-5_cp312.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-26.2.0-py312hbf22597_2.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.16.0-pyh6c4a22f_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.4.1-py312h66e93f0_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h8827d51_1.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/linux-64/zeromq-4.3.5-ha4adb4c_5.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.20.2-pyhd8ed1ab_0.conda + osx-arm64: + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h99b78c6_7.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.8.30-hf0a4a13_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/click-8.1.7-unix_pyh707e725_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.5.0-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-8.5.0-hd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.6.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/jupyter_core-5.7.2-py312h81bd7bf_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.9.0-23_osxarm64_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.9.0-23_osxarm64_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-18.1.8-h3ed4263_7.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20191231-hc8eb9b7_2.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.3-hf9b8971_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran-5.0.0-13_2_0_hd922786_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran5-13.2.0-hf226fd6_3.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.9.0-23_osxarm64_openblas.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.27-openmp_h517c56d_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.20-h99b78c6_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.46.1-hc14010f_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-hfb2fe0b_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-18.1.8-hde57baf_1.conda + - conda: https://conda.modular.com/max/noarch/max-24.5.0-release.conda + - conda: https://conda.modular.com/max/osx-arm64/max-core-24.5.0-release.conda + - conda: https://conda.modular.com/max/osx-arm64/max-python-24.5.0-3.12release.conda + - conda: https://conda.modular.com/max/noarch/mblack-24.5.0-release.conda + - conda: https://conda.modular.com/max/noarch/mojo-jupyter-24.5.0-release.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h7bae524_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-1.26.4-py312h8442bc7_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.3.2-h8359307_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/packaging-24.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/pathspec-0.12.1-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.3.2-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.12.6-h739c21a_0_cpython.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.12-5_cp312.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-26.2.0-py312hc6335d2_2.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.16.0-pyh6c4a22f_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.4.1-py312h024a12e_1.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_0.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h8827d51_1.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zeromq-4.3.5-h64debc3_5.conda + - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.20.2-pyhd8ed1ab_0.conda +packages: +- kind: conda + name: _libgcc_mutex + version: '0.1' + build: conda_forge + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 + sha256: fe51de6107f9edc7aa4f786a70f4a883943bc9d39b3bb7307c04c41410990726 + md5: d7c89558ba9fa0495403155b64376d81 + license: None + size: 2562 + timestamp: 1578324546067 +- kind: conda + name: _openmp_mutex + version: '4.5' + build: 2_gnu + build_number: 16 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_gnu.tar.bz2 + sha256: fbe2c5e56a653bebb982eda4876a9178aedfc2b545f25d0ce9c4c0b508253d22 + md5: 73aaf86a425cc6e73fcf236a5a46396d + depends: + - _libgcc_mutex 0.1 conda_forge + - libgomp >=7.5.0 + constrains: + - openmp_impl 9999 + license: BSD-3-Clause + license_family: BSD + size: 23621 + timestamp: 1650670423406 +- kind: conda + name: bzip2 + version: 1.0.8 + build: h4bc722e_7 + build_number: 7 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-h4bc722e_7.conda + sha256: 5ced96500d945fb286c9c838e54fa759aa04a7129c59800f0846b4335cee770d + md5: 62ee74e96c5ebb0af99386de58cf9553 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc-ng >=12 + license: bzip2-1.0.6 + license_family: BSD + size: 252783 + timestamp: 1720974456583 +- kind: conda + name: bzip2 + version: 1.0.8 + build: h99b78c6_7 + build_number: 7 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/bzip2-1.0.8-h99b78c6_7.conda + sha256: adfa71f158cbd872a36394c56c3568e6034aa55c623634b37a4836bd036e6b91 + md5: fc6948412dbbbe9a4c9ddbbcfe0a79ab + depends: + - __osx >=11.0 + license: bzip2-1.0.6 + license_family: BSD + size: 122909 + timestamp: 1720974522888 +- kind: conda + name: ca-certificates + version: 2024.8.30 + build: hbcca054_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2024.8.30-hbcca054_0.conda + sha256: afee721baa6d988e27fef1832f68d6f32ac8cc99cdf6015732224c2841a09cea + md5: c27d1c142233b5bc9ca570c6e2e0c244 + license: ISC + size: 159003 + timestamp: 1725018903918 +- kind: conda + name: ca-certificates + version: 2024.8.30 + build: hf0a4a13_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/ca-certificates-2024.8.30-hf0a4a13_0.conda + sha256: 2db1733f4b644575dbbdd7994a8f338e6ef937f5ebdb74acd557e9dda0211709 + md5: 40dec13fd8348dbe303e57be74bd3d35 + license: ISC + size: 158482 + timestamp: 1725019034582 +- kind: conda + name: click + version: 8.1.7 + build: unix_pyh707e725_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/click-8.1.7-unix_pyh707e725_0.conda + sha256: f0016cbab6ac4138a429e28dbcb904a90305b34b3fe41a9b89d697c90401caec + md5: f3ad426304898027fc619827ff428eca + depends: + - __unix + - python >=3.8 + license: BSD-3-Clause + license_family: BSD + size: 84437 + timestamp: 1692311973840 +- kind: conda + name: importlib-metadata + version: 8.5.0 + build: pyha770c72_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-8.5.0-pyha770c72_0.conda + sha256: 7194700ce1a5ad2621fd68e894dd8c1ceaff9a38723e6e0e5298fdef13017b1c + md5: 54198435fce4d64d8a89af22573012a8 + depends: + - python >=3.8 + - zipp >=0.5 + license: Apache-2.0 + license_family: APACHE + size: 28646 + timestamp: 1726082927916 +- kind: conda + name: importlib_metadata + version: 8.5.0 + build: hd8ed1ab_0 + subdir: noarch + noarch: generic + url: https://conda.anaconda.org/conda-forge/noarch/importlib_metadata-8.5.0-hd8ed1ab_0.conda + sha256: 313b8a05211bacd6b15ab2621cb73d7f41ea5c6cae98db53367d47833f03fef1 + md5: 2a92e152208121afadf85a5e1f3a5f4d + depends: + - importlib-metadata >=8.5.0,<8.5.1.0a0 + license: Apache-2.0 + license_family: APACHE + size: 9385 + timestamp: 1726082930346 +- kind: conda + name: jupyter_client + version: 8.6.2 + build: pyhd8ed1ab_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/jupyter_client-8.6.2-pyhd8ed1ab_0.conda + sha256: 634f065cdd1d0aacd4bb6848ebf240dcebc8578135d65f4ad4aa42b2276c4e0c + md5: 3cdbb2fa84490e5fd44c9f9806c0d292 + depends: + - importlib_metadata >=4.8.3 + - jupyter_core >=4.12,!=5.0.* + - python >=3.8 + - python-dateutil >=2.8.2 + - pyzmq >=23.0 + - tornado >=6.2 + - traitlets >=5.3 + license: BSD-3-Clause + license_family: BSD + size: 106248 + timestamp: 1716472312833 +- kind: conda + name: jupyter_core + version: 5.7.2 + build: py312h7900ff3_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/jupyter_core-5.7.2-py312h7900ff3_0.conda + sha256: 22a6259c2b139191c76ed7633d1865757b3c15007989f6c74304a80f28e5a262 + md5: eee5a2e3465220ed87196bbb5665f420 + depends: + - platformdirs >=2.5 + - python >=3.12,<3.13.0a0 + - python_abi 3.12.* *_cp312 + - traitlets >=5.3 + license: BSD-3-Clause + license_family: BSD + size: 92843 + timestamp: 1710257533875 +- kind: conda + name: jupyter_core + version: 5.7.2 + build: py312h81bd7bf_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/jupyter_core-5.7.2-py312h81bd7bf_0.conda + sha256: 5ab0e75a30915d34ae27b4a76f1241c2f4cc4419b6b1c838cc1160b9ec8bfaf5 + md5: 209b9cb7159212afce5e16d7a3ee3b47 + depends: + - platformdirs >=2.5 + - python >=3.12,<3.13.0a0 + - python >=3.12,<3.13.0a0 *_cpython + - python_abi 3.12.* *_cp312 + - traitlets >=5.3 + license: BSD-3-Clause + license_family: BSD + size: 93829 + timestamp: 1710257916303 +- kind: conda + name: keyutils + version: 1.6.1 + build: h166bdaf_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.1-h166bdaf_0.tar.bz2 + sha256: 150c05a6e538610ca7c43beb3a40d65c90537497a4f6a5f4d15ec0451b6f5ebb + md5: 30186d27e2c9fa62b45fb1476b7200e3 + depends: + - libgcc-ng >=10.3.0 + license: LGPL-2.1-or-later + size: 117831 + timestamp: 1646151697040 +- kind: conda + name: krb5 + version: 1.21.3 + build: h237132a_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/krb5-1.21.3-h237132a_0.conda + sha256: 4442f957c3c77d69d9da3521268cad5d54c9033f1a73f99cde0a3658937b159b + md5: c6dc8a0fdec13a0565936655c33069a1 + depends: + - __osx >=11.0 + - libcxx >=16 + - libedit >=3.1.20191231,<3.2.0a0 + - libedit >=3.1.20191231,<4.0a0 + - openssl >=3.3.1,<4.0a0 + license: MIT + license_family: MIT + size: 1155530 + timestamp: 1719463474401 +- kind: conda + name: krb5 + version: 1.21.3 + build: h659f571_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/krb5-1.21.3-h659f571_0.conda + sha256: 99df692f7a8a5c27cd14b5fb1374ee55e756631b9c3d659ed3ee60830249b238 + md5: 3f43953b7d3fb3aaa1d0d0723d91e368 + depends: + - keyutils >=1.6.1,<2.0a0 + - libedit >=3.1.20191231,<3.2.0a0 + - libedit >=3.1.20191231,<4.0a0 + - libgcc-ng >=12 + - libstdcxx-ng >=12 + - openssl >=3.3.1,<4.0a0 + license: MIT + license_family: MIT + size: 1370023 + timestamp: 1719463201255 +- kind: conda + name: ld_impl_linux-64 + version: '2.40' + build: hf3520f5_7 + build_number: 7 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.40-hf3520f5_7.conda + sha256: 764b6950aceaaad0c67ef925417594dd14cd2e22fff864aeef455ac259263d15 + md5: b80f2f396ca2c28b8c14c437a4ed1e74 + constrains: + - binutils_impl_linux-64 2.40 + license: GPL-3.0-only + license_family: GPL + size: 707602 + timestamp: 1718625640445 +- kind: conda + name: libblas + version: 3.9.0 + build: 23_linux64_openblas + build_number: 23 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-23_linux64_openblas.conda + sha256: edb1cee5da3ac4936940052dcab6969673ba3874564f90f5110f8c11eed789c2 + md5: 96c8450a40aa2b9733073a9460de972c + depends: + - libopenblas >=0.3.27,<0.3.28.0a0 + - libopenblas >=0.3.27,<1.0a0 + constrains: + - liblapacke 3.9.0 23_linux64_openblas + - libcblas 3.9.0 23_linux64_openblas + - liblapack 3.9.0 23_linux64_openblas + - blas * openblas + license: BSD-3-Clause + license_family: BSD + size: 14880 + timestamp: 1721688759937 +- kind: conda + name: libblas + version: 3.9.0 + build: 23_osxarm64_openblas + build_number: 23 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libblas-3.9.0-23_osxarm64_openblas.conda + sha256: 1c30da861e306a25fac8cd30ce0c1b31c9238d04e7768c381cf4d431b4361e6c + md5: acae9191e8772f5aff48ab5232d4d2a3 + depends: + - libopenblas >=0.3.27,<0.3.28.0a0 + - libopenblas >=0.3.27,<1.0a0 + constrains: + - liblapack 3.9.0 23_osxarm64_openblas + - blas * openblas + - liblapacke 3.9.0 23_osxarm64_openblas + - libcblas 3.9.0 23_osxarm64_openblas + license: BSD-3-Clause + license_family: BSD + size: 15103 + timestamp: 1721688997980 +- kind: conda + name: libcblas + version: 3.9.0 + build: 23_linux64_openblas + build_number: 23 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-23_linux64_openblas.conda + sha256: 3e7a3236e7e03e308e1667d91d0aa70edd0cba96b4b5563ef4adde088e0881a5 + md5: eede29b40efa878cbe5bdcb767e97310 + depends: + - libblas 3.9.0 23_linux64_openblas + constrains: + - liblapacke 3.9.0 23_linux64_openblas + - liblapack 3.9.0 23_linux64_openblas + - blas * openblas + license: BSD-3-Clause + license_family: BSD + size: 14798 + timestamp: 1721688767584 +- kind: conda + name: libcblas + version: 3.9.0 + build: 23_osxarm64_openblas + build_number: 23 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libcblas-3.9.0-23_osxarm64_openblas.conda + sha256: c39d944909d0608bd0333398be5e0051045c9451bfd6cc6320732d33375569c8 + md5: bad6ee9b7d5584efc2bc5266137b5f0d + depends: + - libblas 3.9.0 23_osxarm64_openblas + constrains: + - liblapack 3.9.0 23_osxarm64_openblas + - liblapacke 3.9.0 23_osxarm64_openblas + - blas * openblas + license: BSD-3-Clause + license_family: BSD + size: 14991 + timestamp: 1721689017803 +- kind: conda + name: libcxx + version: 18.1.8 + build: h3ed4263_7 + build_number: 7 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-18.1.8-h3ed4263_7.conda + sha256: 15b4abaa249f0965ce42aeb4a1a2b1b5df9a1f402e7c5bd8156272fd6cad2878 + md5: e0e7d9a2ec0f9509ffdfd5f48da522fb + depends: + - __osx >=11.0 + license: Apache-2.0 WITH LLVM-exception + license_family: Apache + size: 436921 + timestamp: 1725403628507 +- kind: conda + name: libedit + version: 3.1.20191231 + build: hc8eb9b7_2 + build_number: 2 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20191231-hc8eb9b7_2.tar.bz2 + sha256: 3912636197933ecfe4692634119e8644904b41a58f30cad9d1fc02f6ba4d9fca + md5: 30e4362988a2623e9eb34337b83e01f9 + depends: + - ncurses >=6.2,<7.0.0a0 + license: BSD-2-Clause + license_family: BSD + size: 96607 + timestamp: 1597616630749 +- kind: conda + name: libedit + version: 3.1.20191231 + build: he28a2e2_2 + build_number: 2 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20191231-he28a2e2_2.tar.bz2 + sha256: a57d37c236d8f7c886e01656f4949d9dcca131d2a0728609c6f7fa338b65f1cf + md5: 4d331e44109e3f0e19b4cb8f9b82f3e1 + depends: + - libgcc-ng >=7.5.0 + - ncurses >=6.2,<7.0.0a0 + license: BSD-2-Clause + license_family: BSD + size: 123878 + timestamp: 1597616541093 +- kind: conda + name: libexpat + version: 2.6.3 + build: h5888daf_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.3-h5888daf_0.conda + sha256: 4bb47bb2cd09898737a5211e2992d63c555d63715a07ba56eae0aff31fb89c22 + md5: 59f4c43bb1b5ef1c71946ff2cbf59524 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + constrains: + - expat 2.6.3.* + license: MIT + license_family: MIT + size: 73616 + timestamp: 1725568742634 +- kind: conda + name: libexpat + version: 2.6.3 + build: hf9b8971_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.3-hf9b8971_0.conda + sha256: 5cbe5a199fba14ade55457a468ce663aac0b54832c39aa54470b3889b4c75c4a + md5: 5f22f07c2ab2dea8c66fe9585a062c96 + depends: + - __osx >=11.0 + constrains: + - expat 2.6.3.* + license: MIT + license_family: MIT + size: 63895 + timestamp: 1725568783033 +- kind: conda + name: libffi + version: 3.4.2 + build: h3422bc3_5 + build_number: 5 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 + sha256: 41b3d13efb775e340e4dba549ab5c029611ea6918703096b2eaa9c015c0750ca + md5: 086914b672be056eb70fd4285b6783b6 + license: MIT + license_family: MIT + size: 39020 + timestamp: 1636488587153 +- kind: conda + name: libffi + version: 3.4.2 + build: h7f98852_5 + build_number: 5 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 + sha256: ab6e9856c21709b7b517e940ae7028ae0737546122f83c2aa5d692860c3b149e + md5: d645c6d2ac96843a2bfaccd2d62b3ac3 + depends: + - libgcc-ng >=9.4.0 + license: MIT + license_family: MIT + size: 58292 + timestamp: 1636488182923 +- kind: conda + name: libgcc + version: 14.1.0 + build: h77fa898_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.1.0-h77fa898_1.conda + sha256: 10fa74b69266a2be7b96db881e18fa62cfa03082b65231e8d652e897c4b335a3 + md5: 002ef4463dd1e2b44a94a4ace468f5d2 + depends: + - _libgcc_mutex 0.1 conda_forge + - _openmp_mutex >=4.5 + constrains: + - libgomp 14.1.0 h77fa898_1 + - libgcc-ng ==14.1.0=*_1 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 846380 + timestamp: 1724801836552 +- kind: conda + name: libgcc-ng + version: 14.1.0 + build: h69a702a_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.1.0-h69a702a_1.conda + sha256: b91f7021e14c3d5c840fbf0dc75370d6e1f7c7ff4482220940eaafb9c64613b7 + md5: 1efc0ad219877a73ef977af7dbb51f17 + depends: + - libgcc 14.1.0 h77fa898_1 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 52170 + timestamp: 1724801842101 +- kind: conda + name: libgfortran + version: 5.0.0 + build: 13_2_0_hd922786_3 + build_number: 3 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran-5.0.0-13_2_0_hd922786_3.conda + sha256: 44e541b4821c96b28b27fef5630883a60ce4fee91fd9c79f25a199f8f73f337b + md5: 4a55d9e169114b2b90d3ec4604cd7bbf + depends: + - libgfortran5 13.2.0 hf226fd6_3 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 110233 + timestamp: 1707330749033 +- kind: conda + name: libgfortran + version: 14.1.0 + build: h69a702a_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-14.1.0-h69a702a_1.conda + sha256: ed77f04f873e43a26e24d443dd090631eedc7d0ace3141baaefd96a123e47535 + md5: 591e631bc1ae62c64f2ab4f66178c097 + depends: + - libgfortran5 14.1.0 hc5f4f2c_1 + constrains: + - libgfortran-ng ==14.1.0=*_1 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 52142 + timestamp: 1724801872472 +- kind: conda + name: libgfortran-ng + version: 14.1.0 + build: h69a702a_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libgfortran-ng-14.1.0-h69a702a_1.conda + sha256: a2dc35cb7f87bb5beebf102d4085574c6a740e1df58e743185d4434cc5e4e0ae + md5: 16cec94c5992d7f42ae3f9fa8b25df8d + depends: + - libgfortran 14.1.0 h69a702a_1 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 52212 + timestamp: 1724802086021 +- kind: conda + name: libgfortran5 + version: 13.2.0 + build: hf226fd6_3 + build_number: 3 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libgfortran5-13.2.0-hf226fd6_3.conda + sha256: bafc679eedb468a86aa4636061c55966186399ee0a04b605920d208d97ac579a + md5: 66ac81d54e95c534ae488726c1f698ea + depends: + - llvm-openmp >=8.0.0 + constrains: + - libgfortran 5.0.0 13_2_0_*_3 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 997381 + timestamp: 1707330687590 +- kind: conda + name: libgfortran5 + version: 14.1.0 + build: hc5f4f2c_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-14.1.0-hc5f4f2c_1.conda + sha256: c40d7db760296bf9c776de12597d2f379f30e890b9ae70c1de962ff2aa1999f6 + md5: 10a0cef64b784d6ab6da50ebca4e984d + depends: + - libgcc >=14.1.0 + constrains: + - libgfortran 14.1.0 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 1459939 + timestamp: 1724801851300 +- kind: conda + name: libgomp + version: 14.1.0 + build: h77fa898_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.1.0-h77fa898_1.conda + sha256: c96724c8ae4ee61af7674c5d9e5a3fbcf6cd887a40ad5a52c99aa36f1d4f9680 + md5: 23c255b008c4f2ae008f81edcabaca89 + depends: + - _libgcc_mutex 0.1 conda_forge + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 460218 + timestamp: 1724801743478 +- kind: conda + name: liblapack + version: 3.9.0 + build: 23_linux64_openblas + build_number: 23 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-23_linux64_openblas.conda + sha256: 25c7aef86c8a1d9db0e8ee61aa7462ba3b46b482027a65d66eb83e3e6f949043 + md5: 2af0879961951987e464722fd00ec1e0 + depends: + - libblas 3.9.0 23_linux64_openblas + constrains: + - liblapacke 3.9.0 23_linux64_openblas + - libcblas 3.9.0 23_linux64_openblas + - blas * openblas + license: BSD-3-Clause + license_family: BSD + size: 14823 + timestamp: 1721688775172 +- kind: conda + name: liblapack + version: 3.9.0 + build: 23_osxarm64_openblas + build_number: 23 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/liblapack-3.9.0-23_osxarm64_openblas.conda + sha256: 13799a137ffc80786725e7e2820d37d4c0d59dbb76013a14c21771415b0a4263 + md5: 754ef44f72ab80fd14eaa789ac393a27 + depends: + - libblas 3.9.0 23_osxarm64_openblas + constrains: + - blas * openblas + - liblapacke 3.9.0 23_osxarm64_openblas + - libcblas 3.9.0 23_osxarm64_openblas + license: BSD-3-Clause + license_family: BSD + size: 14999 + timestamp: 1721689026268 +- kind: conda + name: libnsl + version: 2.0.1 + build: hd590300_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.1-hd590300_0.conda + sha256: 26d77a3bb4dceeedc2a41bd688564fe71bf2d149fdcf117049970bc02ff1add6 + md5: 30fd6e37fe21f86f4bd26d6ee73eeec7 + depends: + - libgcc-ng >=12 + license: LGPL-2.1-only + license_family: GPL + size: 33408 + timestamp: 1697359010159 +- kind: conda + name: libopenblas + version: 0.3.27 + build: openmp_h517c56d_1 + build_number: 1 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libopenblas-0.3.27-openmp_h517c56d_1.conda + sha256: 46cfcc592b5255262f567cd098be3c61da6bca6c24d640e878dc8342b0f6d069 + md5: 71b8a34d70aa567a990162f327e81505 + depends: + - __osx >=11.0 + - libgfortran 5.* + - libgfortran5 >=12.3.0 + - llvm-openmp >=16.0.6 + constrains: + - openblas >=0.3.27,<0.3.28.0a0 + license: BSD-3-Clause + license_family: BSD + size: 2925328 + timestamp: 1720425811743 +- kind: conda + name: libopenblas + version: 0.3.27 + build: pthreads_hac2b453_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.27-pthreads_hac2b453_1.conda + sha256: 714cb82d7c4620ea2635a92d3df263ab841676c9b183d0c01992767bb2451c39 + md5: ae05ece66d3924ac3d48b4aa3fa96cec + depends: + - libgcc-ng >=12 + - libgfortran-ng + - libgfortran5 >=12.3.0 + constrains: + - openblas >=0.3.27,<0.3.28.0a0 + license: BSD-3-Clause + license_family: BSD + size: 5563053 + timestamp: 1720426334043 +- kind: conda + name: libsodium + version: 1.0.20 + build: h4ab18f5_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.20-h4ab18f5_0.conda + sha256: 0105bd108f19ea8e6a78d2d994a6d4a8db16d19a41212070d2d1d48a63c34161 + md5: a587892d3c13b6621a6091be690dbca2 + depends: + - libgcc-ng >=12 + license: ISC + size: 205978 + timestamp: 1716828628198 +- kind: conda + name: libsodium + version: 1.0.20 + build: h99b78c6_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.20-h99b78c6_0.conda + sha256: fade8223e1e1004367d7101dd17261003b60aa576df6d7802191f8972f7470b1 + md5: a7ce36e284c5faaf93c220dfc39e3abd + depends: + - __osx >=11.0 + license: ISC + size: 164972 + timestamp: 1716828607917 +- kind: conda + name: libsqlite + version: 3.46.1 + build: hadc24fc_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.46.1-hadc24fc_0.conda + sha256: 9851c049abafed3ee329d6c7c2033407e2fc269d33a75c071110ab52300002b0 + md5: 36f79405ab16bf271edb55b213836dac + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + - libzlib >=1.3.1,<2.0a0 + license: Unlicense + size: 865214 + timestamp: 1725353659783 +- kind: conda + name: libsqlite + version: 3.46.1 + build: hc14010f_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libsqlite-3.46.1-hc14010f_0.conda + sha256: 3725f962f490c5d44dae326d5f5b2e3c97f71a6322d914ccc85b5ddc2e50d120 + md5: 58050ec1724e58668d0126a1615553fa + depends: + - __osx >=11.0 + - libzlib >=1.3.1,<2.0a0 + license: Unlicense + size: 829500 + timestamp: 1725353720793 +- kind: conda + name: libstdcxx + version: 14.1.0 + build: hc0a3c3a_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-14.1.0-hc0a3c3a_1.conda + sha256: 44decb3d23abacf1c6dd59f3c152a7101b7ca565b4ef8872804ceaedcc53a9cd + md5: 9dbb9699ea467983ba8a4ba89b08b066 + depends: + - libgcc 14.1.0 h77fa898_1 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 3892781 + timestamp: 1724801863728 +- kind: conda + name: libstdcxx-ng + version: 14.1.0 + build: h4852527_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-14.1.0-h4852527_1.conda + sha256: a2dc44f97290740cc187bfe94ce543e6eb3c2ea8964d99f189a1d8c97b419b8c + md5: bd2598399a70bb86d8218e95548d735e + depends: + - libstdcxx 14.1.0 hc0a3c3a_1 + license: GPL-3.0-only WITH GCC-exception-3.1 + license_family: GPL + size: 52219 + timestamp: 1724801897766 +- kind: conda + name: libuuid + version: 2.38.1 + build: h0b41bf4_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.38.1-h0b41bf4_0.conda + sha256: 787eb542f055a2b3de553614b25f09eefb0a0931b0c87dbcce6efdfd92f04f18 + md5: 40b61aab5c7ba9ff276c41cfffe6b80b + depends: + - libgcc-ng >=12 + license: BSD-3-Clause + license_family: BSD + size: 33601 + timestamp: 1680112270483 +- kind: conda + name: libxcrypt + version: 4.4.36 + build: hd590300_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libxcrypt-4.4.36-hd590300_1.conda + sha256: 6ae68e0b86423ef188196fff6207ed0c8195dd84273cb5623b85aa08033a410c + md5: 5aa797f8787fe7a17d1b0821485b5adc + depends: + - libgcc-ng >=12 + license: LGPL-2.1-or-later + size: 100393 + timestamp: 1702724383534 +- kind: conda + name: libzlib + version: 1.3.1 + build: h4ab18f5_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.3.1-h4ab18f5_1.conda + sha256: adf6096f98b537a11ae3729eaa642b0811478f0ea0402ca67b5108fe2cb0010d + md5: 57d7dc60e9325e3de37ff8dffd18e814 + depends: + - libgcc-ng >=12 + constrains: + - zlib 1.3.1 *_1 + license: Zlib + license_family: Other + size: 61574 + timestamp: 1716874187109 +- kind: conda + name: libzlib + version: 1.3.1 + build: hfb2fe0b_1 + build_number: 1 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/libzlib-1.3.1-hfb2fe0b_1.conda + sha256: c34365dd37b0eab27b9693af32a1f7f284955517c2cc91f1b88a7ef4738ff03e + md5: 636077128927cf79fd933276dc3aed47 + depends: + - __osx >=11.0 + constrains: + - zlib 1.3.1 *_1 + license: Zlib + license_family: Other + size: 46921 + timestamp: 1716874262512 +- kind: conda + name: llvm-openmp + version: 18.1.8 + build: hde57baf_1 + build_number: 1 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-18.1.8-hde57baf_1.conda + sha256: 7a76e2932ac77e6314bfa1c4ff83f617c8260313bfed1b8401b508ed3e9d70ba + md5: fe89757e3cd14bb1c6ebd68dac591363 + depends: + - __osx >=11.0 + constrains: + - openmp 18.1.8|18.1.8.* + license: Apache-2.0 WITH LLVM-exception + license_family: APACHE + size: 276263 + timestamp: 1723605341828 +- kind: conda + name: max + version: 24.5.0 + build: release + subdir: noarch + noarch: python + url: https://conda.modular.com/max/noarch/max-24.5.0-release.conda + sha256: 3050d7885a304944afbf93ca9786e56e6df20f0685e1705f88fab045fb5aae70 + md5: 662a61803cd141e857d3b9f821c7bd66 + depends: + - max-core ==24.5.0 release + - max-python >=24.5.0,<25.0a0 + - mojo-jupyter ==24.5.0 release + - mblack ==24.5.0 release + size: 9642 + timestamp: 1726172475909 +- kind: conda + name: max-core + version: 24.5.0 + build: release + subdir: linux-64 + url: https://conda.modular.com/max/linux-64/max-core-24.5.0-release.conda + sha256: 4cd4ab217863a500e9df8112d5e4c335192baa4f527aaaacb925b7818dd2bbe1 + md5: a9b3f9d69310032f687789c475c029f5 + depends: + - mblack ==24.5.0 release + arch: x86_64 + platform: linux + size: 284994357 + timestamp: 1726172475907 +- kind: conda + name: max-core + version: 24.5.0 + build: release + subdir: osx-arm64 + url: https://conda.modular.com/max/osx-arm64/max-core-24.5.0-release.conda + sha256: 8848071dde1f98a4da8e39c90f9210098e7c3c4aaddd0e2255fd9fe1f01df0b7 + md5: fba502bf5142da57735a593ccf35a255 + depends: + - mblack ==24.5.0 release + arch: arm64 + platform: osx + size: 244231803 + timestamp: 1726175523753 +- kind: conda + name: max-python + version: 24.5.0 + build: 3.12release + subdir: linux-64 + url: https://conda.modular.com/max/linux-64/max-python-24.5.0-3.12release.conda + sha256: b5b0f36bb4c91bdff229fc680d7d2e4dd183e9dc90808869408e5883d95199ba + md5: e8dbea1cf138f97c022103a4b41c77bd + depends: + - max-core ==24.5.0 release + - python 3.12.* + - numpy >=1.18,<2.0 + - python_abi 3.12.* *_cp312 + arch: x86_64 + platform: linux + size: 138310039 + timestamp: 1726172475912 +- kind: conda + name: max-python + version: 24.5.0 + build: 3.12release + subdir: osx-arm64 + url: https://conda.modular.com/max/osx-arm64/max-python-24.5.0-3.12release.conda + sha256: e6cdd0477236d49d4f6586d4a66ffe1c5e5cb188535a8ec09ed742eda12cbf5f + md5: f33d8f4cc5c17d893fdb5d6e162c08c6 + depends: + - max-core ==24.5.0 release + - python 3.12.* + - numpy >=1.18,<2.0 + - python_abi 3.12.* *_cp312 + arch: arm64 + platform: osx + size: 125388933 + timestamp: 1726175523755 +- kind: conda + name: mblack + version: 24.5.0 + build: release + subdir: noarch + noarch: python + url: https://conda.modular.com/max/noarch/mblack-24.5.0-release.conda + sha256: 913881fc3aa19db447ed82e898f261a413be9129dc43b9ea600e06030f76dbd5 + md5: 2bc6ce9f257235686dc1b2509cc7198d + depends: + - python >=3.9,<3.13 + - click >=8.0.0 + - mypy_extensions >=0.4.3 + - packaging >=22.0 + - pathspec >=0.9.0 + - platformdirs >=2 + - python + license: MIT + size: 130435 + timestamp: 1726172475910 +- kind: conda + name: mojo-jupyter + version: 24.5.0 + build: release + subdir: noarch + noarch: python + url: https://conda.modular.com/max/noarch/mojo-jupyter-24.5.0-release.conda + sha256: dff2e857eae32ce92fde12a712756d647f0aa312aeb5d79b350b2acbc71a2f96 + md5: 3b7be5cbff5b8015b095e950506be4b3 + depends: + - max-core ==24.5.0 release + - python >=3.9,<3.13 + - jupyter_client >=8.6.2,<8.7 + - python + size: 21595 + timestamp: 1726172475911 +- kind: conda + name: mypy_extensions + version: 1.0.0 + build: pyha770c72_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/mypy_extensions-1.0.0-pyha770c72_0.conda + sha256: f240217476e148e825420c6bc3a0c0efb08c0718b7042fae960400c02af858a3 + md5: 4eccaeba205f0aed9ac3a9ea58568ca3 + depends: + - python >=3.5 + license: MIT + license_family: MIT + size: 10492 + timestamp: 1675543414256 +- kind: conda + name: ncurses + version: '6.5' + build: h7bae524_1 + build_number: 1 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/ncurses-6.5-h7bae524_1.conda + sha256: 27d0b9ff78ad46e1f3a6c96c479ab44beda5f96def88e2fe626e0a49429d8afc + md5: cb2b0ea909b97b3d70cd3921d1445e1a + depends: + - __osx >=11.0 + license: X11 AND BSD-3-Clause + size: 802321 + timestamp: 1724658775723 +- kind: conda + name: ncurses + version: '6.5' + build: he02047a_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.5-he02047a_1.conda + sha256: 6a1d5d8634c1a07913f1c525db6455918cbc589d745fac46d9d6e30340c8731a + md5: 70caf8bb6cf39a0b6b7efc885f51c0fe + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc-ng >=12 + license: X11 AND BSD-3-Clause + size: 889086 + timestamp: 1724658547447 +- kind: conda + name: numpy + version: 1.26.4 + build: py312h8442bc7_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/numpy-1.26.4-py312h8442bc7_0.conda + sha256: c8841d6d6f61fd70ca80682efbab6bdb8606dc77c68d8acabfbd7c222054f518 + md5: d83fc83d589e2625a3451c9a7e21047c + depends: + - libblas >=3.9.0,<4.0a0 + - libcblas >=3.9.0,<4.0a0 + - libcxx >=16 + - liblapack >=3.9.0,<4.0a0 + - python >=3.12,<3.13.0a0 + - python >=3.12,<3.13.0a0 *_cpython + - python_abi 3.12.* *_cp312 + constrains: + - numpy-base <0a0 + license: BSD-3-Clause + license_family: BSD + size: 6073136 + timestamp: 1707226249608 +- kind: conda + name: numpy + version: 1.26.4 + build: py312heda63a1_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/numpy-1.26.4-py312heda63a1_0.conda + sha256: fe3459c75cf84dcef6ef14efcc4adb0ade66038ddd27cadb894f34f4797687d8 + md5: d8285bea2a350f63fab23bf460221f3f + depends: + - libblas >=3.9.0,<4.0a0 + - libcblas >=3.9.0,<4.0a0 + - libgcc-ng >=12 + - liblapack >=3.9.0,<4.0a0 + - libstdcxx-ng >=12 + - python >=3.12,<3.13.0a0 + - python_abi 3.12.* *_cp312 + constrains: + - numpy-base <0a0 + license: BSD-3-Clause + license_family: BSD + size: 7484186 + timestamp: 1707225809722 +- kind: conda + name: openssl + version: 3.3.2 + build: h8359307_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/openssl-3.3.2-h8359307_0.conda + sha256: 940fa01c4dc6152158fe8943e05e55a1544cab639df0994e3b35937839e4f4d1 + md5: 1773ebccdc13ec603356e8ff1db9e958 + depends: + - __osx >=11.0 + - ca-certificates + license: Apache-2.0 + license_family: Apache + size: 2882450 + timestamp: 1725410638874 +- kind: conda + name: openssl + version: 3.3.2 + build: hb9d3cd8_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/openssl-3.3.2-hb9d3cd8_0.conda + sha256: cee91036686419f6dd6086902acf7142b4916e1c4ba042e9ca23e151da012b6d + md5: 4d638782050ab6faa27275bed57e9b4e + depends: + - __glibc >=2.17,<3.0.a0 + - ca-certificates + - libgcc >=13 + license: Apache-2.0 + license_family: Apache + size: 2891789 + timestamp: 1725410790053 +- kind: conda + name: packaging + version: '24.1' + build: pyhd8ed1ab_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/packaging-24.1-pyhd8ed1ab_0.conda + sha256: 36aca948219e2c9fdd6d80728bcc657519e02f06c2703d8db3446aec67f51d81 + md5: cbe1bb1f21567018ce595d9c2be0f0db + depends: + - python >=3.8 + license: Apache-2.0 + license_family: APACHE + size: 50290 + timestamp: 1718189540074 +- kind: conda + name: pathspec + version: 0.12.1 + build: pyhd8ed1ab_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/pathspec-0.12.1-pyhd8ed1ab_0.conda + sha256: 4e534e66bfe8b1e035d2169d0e5b185450546b17e36764272863e22e0370be4d + md5: 17064acba08d3686f1135b5ec1b32b12 + depends: + - python >=3.7 + license: MPL-2.0 + license_family: MOZILLA + size: 41173 + timestamp: 1702250135032 +- kind: conda + name: platformdirs + version: 4.3.2 + build: pyhd8ed1ab_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/platformdirs-4.3.2-pyhd8ed1ab_0.conda + sha256: 3aef5bb863a2db94e47272fd5ec5a5e4b240eafba79ebb9df7a162797cf035a3 + md5: e1a2dfcd5695f0744f1bcd3bbfe02523 + depends: + - python >=3.8 + license: MIT + license_family: MIT + size: 20623 + timestamp: 1725821846879 +- kind: conda + name: python + version: 3.12.5 + build: h2ad013b_0_cpython + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/python-3.12.5-h2ad013b_0_cpython.conda + sha256: e2aad83838988725d4ffba4e9717b9328054fd18a668cff3377e0c50f109e8bd + md5: 9c56c4df45f6571b13111d8df2448692 + depends: + - __glibc >=2.17,<3.0.a0 + - bzip2 >=1.0.8,<2.0a0 + - ld_impl_linux-64 >=2.36.1 + - libexpat >=2.6.2,<3.0a0 + - libffi >=3.4,<4.0a0 + - libgcc-ng >=12 + - libnsl >=2.0.1,<2.1.0a0 + - libsqlite >=3.46.0,<4.0a0 + - libuuid >=2.38.1,<3.0a0 + - libxcrypt >=4.4.36 + - libzlib >=1.3.1,<2.0a0 + - ncurses >=6.5,<7.0a0 + - openssl >=3.3.1,<4.0a0 + - readline >=8.2,<9.0a0 + - tk >=8.6.13,<8.7.0a0 + - tzdata + - xz >=5.2.6,<6.0a0 + constrains: + - python_abi 3.12.* *_cp312 + license: Python-2.0 + size: 31663253 + timestamp: 1723143721353 +- kind: conda + name: python + version: 3.12.6 + build: h739c21a_0_cpython + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.12.6-h739c21a_0_cpython.conda + sha256: 7dc75f4a7f800426e39ba219a1202c00b002cd0c792e34e077d3d7c145ef0199 + md5: 1d0f564edfc8121b35a4dc2d25b62863 + depends: + - __osx >=11.0 + - bzip2 >=1.0.8,<2.0a0 + - libexpat >=2.6.3,<3.0a0 + - libffi >=3.4,<4.0a0 + - libsqlite >=3.46.1,<4.0a0 + - libzlib >=1.3.1,<2.0a0 + - ncurses >=6.5,<7.0a0 + - openssl >=3.3.2,<4.0a0 + - readline >=8.2,<9.0a0 + - tk >=8.6.13,<8.7.0a0 + - tzdata + - xz >=5.2.6,<6.0a0 + constrains: + - python_abi 3.12.* *_cp312 + license: Python-2.0 + size: 12877861 + timestamp: 1726030796871 +- kind: conda + name: python-dateutil + version: 2.9.0 + build: pyhd8ed1ab_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0-pyhd8ed1ab_0.conda + sha256: f3ceef02ac164a8d3a080d0d32f8e2ebe10dd29e3a685d240e38b3599e146320 + md5: 2cf4264fffb9e6eff6031c5b6884d61c + depends: + - python >=3.7 + - six >=1.5 + license: Apache-2.0 + license_family: APACHE + size: 222742 + timestamp: 1709299922152 +- kind: conda + name: python_abi + version: '3.12' + build: 5_cp312 + build_number: 5 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.12-5_cp312.conda + sha256: d10e93d759931ffb6372b45d65ff34d95c6000c61a07e298d162a3bc2accebb0 + md5: 0424ae29b104430108f5218a66db7260 + constrains: + - python 3.12.* *_cpython + license: BSD-3-Clause + license_family: BSD + size: 6238 + timestamp: 1723823388266 +- kind: conda + name: python_abi + version: '3.12' + build: 5_cp312 + build_number: 5 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.12-5_cp312.conda + sha256: 49d624e4b809c799d2bf257b22c23cf3fc4460f5570d9a58e7ad86350aeaa1f4 + md5: b76f9b1c862128e56ac7aa8cd2333de9 + constrains: + - python 3.12.* *_cpython + license: BSD-3-Clause + license_family: BSD + size: 6278 + timestamp: 1723823099686 +- kind: conda + name: pyzmq + version: 26.2.0 + build: py312hbf22597_2 + build_number: 2 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-26.2.0-py312hbf22597_2.conda + sha256: a2431644cdef4111f7120565090114f52897e687e83c991bd76a3baef8de77c4 + md5: 44f46ddfdd01d242d2fff2d69a0d7cba + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + - libsodium >=1.0.20,<1.0.21.0a0 + - libstdcxx >=13 + - python >=3.12,<3.13.0a0 + - python_abi 3.12.* *_cp312 + - zeromq >=4.3.5,<4.4.0a0 + license: BSD-3-Clause + license_family: BSD + size: 378667 + timestamp: 1725449078945 +- kind: conda + name: pyzmq + version: 26.2.0 + build: py312hc6335d2_2 + build_number: 2 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-26.2.0-py312hc6335d2_2.conda + sha256: 8d46c0f1af50989f308b9da68e6123bc3560f3a3a741b4e7cb8867c603b5a9f1 + md5: ca61d76f24d66c2938af62e882c9a02d + depends: + - __osx >=11.0 + - libcxx >=17 + - libsodium >=1.0.20,<1.0.21.0a0 + - python >=3.12,<3.13.0a0 + - python >=3.12,<3.13.0a0 *_cpython + - python_abi 3.12.* *_cp312 + - zeromq >=4.3.5,<4.4.0a0 + license: BSD-3-Clause + license_family: BSD + size: 359594 + timestamp: 1725449428595 +- kind: conda + name: readline + version: '8.2' + build: h8228510_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8228510_1.conda + sha256: 5435cf39d039387fbdc977b0a762357ea909a7694d9528ab40f005e9208744d7 + md5: 47d31b792659ce70f470b5c82fdfb7a4 + depends: + - libgcc-ng >=12 + - ncurses >=6.3,<7.0a0 + license: GPL-3.0-only + license_family: GPL + size: 281456 + timestamp: 1679532220005 +- kind: conda + name: readline + version: '8.2' + build: h92ec313_1 + build_number: 1 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h92ec313_1.conda + sha256: a1dfa679ac3f6007362386576a704ad2d0d7a02e98f5d0b115f207a2da63e884 + md5: 8cbb776a2f641b943d413b3e19df71f4 + depends: + - ncurses >=6.3,<7.0a0 + license: GPL-3.0-only + license_family: GPL + size: 250351 + timestamp: 1679532511311 +- kind: conda + name: six + version: 1.16.0 + build: pyh6c4a22f_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/six-1.16.0-pyh6c4a22f_0.tar.bz2 + sha256: a85c38227b446f42c5b90d9b642f2c0567880c15d72492d8da074a59c8f91dd6 + md5: e5f25f8dbc060e9a8d912e432202afc2 + depends: + - python + license: MIT + license_family: MIT + size: 14259 + timestamp: 1620240338595 +- kind: conda + name: tk + version: 8.6.13 + build: h5083fa2_1 + build_number: 1 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/tk-8.6.13-h5083fa2_1.conda + sha256: 72457ad031b4c048e5891f3f6cb27a53cb479db68a52d965f796910e71a403a8 + md5: b50a57ba89c32b62428b71a875291c9b + depends: + - libzlib >=1.2.13,<2.0.0a0 + license: TCL + license_family: BSD + size: 3145523 + timestamp: 1699202432999 +- kind: conda + name: tk + version: 8.6.13 + build: noxft_h4845f30_101 + build_number: 101 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.13-noxft_h4845f30_101.conda + sha256: e0569c9caa68bf476bead1bed3d79650bb080b532c64a4af7d8ca286c08dea4e + md5: d453b98d9c83e71da0741bb0ff4d76bc + depends: + - libgcc-ng >=12 + - libzlib >=1.2.13,<2.0.0a0 + license: TCL + license_family: BSD + size: 3318875 + timestamp: 1699202167581 +- kind: conda + name: tornado + version: 6.4.1 + build: py312h024a12e_1 + build_number: 1 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/tornado-6.4.1-py312h024a12e_1.conda + sha256: 5eefede1d8a2f55892bc582dbcb574b1806f19bc1e3939ce56b79721b9406db7 + md5: 967bc97bb9e258993289546479af971f + depends: + - __osx >=11.0 + - python >=3.12,<3.13.0a0 + - python >=3.12,<3.13.0a0 *_cpython + - python_abi 3.12.* *_cp312 + license: Apache-2.0 + license_family: Apache + size: 841722 + timestamp: 1724956439106 +- kind: conda + name: tornado + version: 6.4.1 + build: py312h66e93f0_1 + build_number: 1 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/tornado-6.4.1-py312h66e93f0_1.conda + sha256: c0c9cc7834e8f43702956afaa5af7b0639c4835c285108a43e6b91687ce53ab8 + md5: af648b62462794649066366af4ecd5b0 + depends: + - __glibc >=2.17,<3.0.a0 + - libgcc >=13 + - python >=3.12,<3.13.0a0 + - python_abi 3.12.* *_cp312 + license: Apache-2.0 + license_family: Apache + size: 837665 + timestamp: 1724956252424 +- kind: conda + name: traitlets + version: 5.14.3 + build: pyhd8ed1ab_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/traitlets-5.14.3-pyhd8ed1ab_0.conda + sha256: 8a64fa0f19022828513667c2c7176cfd125001f3f4b9bc00d33732e627dd2592 + md5: 3df84416a021220d8b5700c613af2dc5 + depends: + - python >=3.8 + license: BSD-3-Clause + license_family: BSD + size: 110187 + timestamp: 1713535244513 +- kind: conda + name: tzdata + version: 2024a + build: h8827d51_1 + build_number: 1 + subdir: noarch + noarch: generic + url: https://conda.anaconda.org/conda-forge/noarch/tzdata-2024a-h8827d51_1.conda + sha256: 7d21c95f61319dba9209ca17d1935e6128af4235a67ee4e57a00908a1450081e + md5: 8bfdead4e0fff0383ae4c9c50d0531bd + license: LicenseRef-Public-Domain + size: 124164 + timestamp: 1724736371498 +- kind: conda + name: xz + version: 5.2.6 + build: h166bdaf_0 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 + sha256: 03a6d28ded42af8a347345f82f3eebdd6807a08526d47899a42d62d319609162 + md5: 2161070d867d1b1204ea749c8eec4ef0 + depends: + - libgcc-ng >=12 + license: LGPL-2.1 and GPL-2.0 + size: 418368 + timestamp: 1660346797927 +- kind: conda + name: xz + version: 5.2.6 + build: h57fd34a_0 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/xz-5.2.6-h57fd34a_0.tar.bz2 + sha256: 59d78af0c3e071021cfe82dc40134c19dab8cdf804324b62940f5c8cd71803ec + md5: 39c6b54e94014701dd157f4f576ed211 + license: LGPL-2.1 and GPL-2.0 + size: 235693 + timestamp: 1660346961024 +- kind: conda + name: zeromq + version: 4.3.5 + build: h64debc3_5 + build_number: 5 + subdir: osx-arm64 + url: https://conda.anaconda.org/conda-forge/osx-arm64/zeromq-4.3.5-h64debc3_5.conda + sha256: b4ba544a04129472651a5df3b8906ed68e7f43bf23e724fd0e368218083c920c + md5: c29dbe9343a0b55b027fa645644c59d9 + depends: + - __osx >=11.0 + - krb5 >=1.21.3,<1.22.0a0 + - libcxx >=17 + - libsodium >=1.0.20,<1.0.21.0a0 + license: MPL-2.0 + license_family: MOZILLA + size: 296355 + timestamp: 1725430145243 +- kind: conda + name: zeromq + version: 4.3.5 + build: ha4adb4c_5 + build_number: 5 + subdir: linux-64 + url: https://conda.anaconda.org/conda-forge/linux-64/zeromq-4.3.5-ha4adb4c_5.conda + sha256: dd48adc07fcd029c86fbf82e68d0e4818c7744b768e08139379920b56b582814 + md5: e8372041ebb377237db9d0d24c7b5962 + depends: + - __glibc >=2.17,<3.0.a0 + - krb5 >=1.21.3,<1.22.0a0 + - libgcc >=13 + - libsodium >=1.0.20,<1.0.21.0a0 + - libstdcxx >=13 + license: MPL-2.0 + license_family: MOZILLA + size: 353159 + timestamp: 1725429777124 +- kind: conda + name: zipp + version: 3.20.2 + build: pyhd8ed1ab_0 + subdir: noarch + noarch: python + url: https://conda.anaconda.org/conda-forge/noarch/zipp-3.20.2-pyhd8ed1ab_0.conda + sha256: 1e84fcfa41e0afdd87ff41e6fbb719c96a0e098c1f79be342293ab0bd8dea322 + md5: 4daaed111c05672ae669f7036ee5bba3 + depends: + - python >=3.8 + license: MIT + license_family: MIT + size: 21409 + timestamp: 1726248679175 diff --git a/mojoproject.toml b/mojoproject.toml new file mode 100644 index 0000000..467afc6 --- /dev/null +++ b/mojoproject.toml @@ -0,0 +1,16 @@ +[project] +authors = ["Mikhail Tavarez "] +channels = ["conda-forge", "https://conda.modular.com/max"] +description = "Experiments in porting over Golang stdlib into Mojo." +name = "gojo" +platforms = ["osx-arm64", "linux-64"] +version = "0.1.8" + +[tasks] +tests = "bash scripts/tests.sh" +benchmarks = "bash scripts/benchmarks.sh" +build = { cmd = "rattler-build build -r src -c https://conda.modular.com/max -c conda-forge --skip-existing=all", env = {MODULAR_MOJO_IMPORT_PATH = "$CONDA_PREFIX/lib/mojo"} } +publish = { cmd = "bash scripts/publish.sh", env = { PREFIX_API_KEY = "$PREFIX_API_KEY" } } + +[dependencies] +max = ">=24.5.0,<25" diff --git a/scripts/benchmarks.sh b/scripts/benchmarks.sh new file mode 100755 index 0000000..e114177 --- /dev/null +++ b/scripts/benchmarks.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +TEMP_DIR=~/tmp +PACKAGE_NAME=gojo +mkdir -p $TEMP_DIR + +echo "[INFO] Building $PACKAGE_NAME package and running benchmarks." +cp -R benchmarks/ $TEMP_DIR +magic run mojo package src/$PACKAGE_NAME -o $TEMP_DIR/$PACKAGE_NAME.mojopkg + +echo "[INFO] Running benchmarks..." +magic run mojo $TEMP_DIR/scanner.mojo +magic run mojo $TEMP_DIR/string_builder.mojo + +echo "[INFO] Cleaning up the benchmarks directory." +rm -R $TEMP_DIR diff --git a/scripts/publish.sh b/scripts/publish.sh new file mode 100644 index 0000000..7aea890 --- /dev/null +++ b/scripts/publish.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# ignore errors because we want to ignore duplicate packages +for file in $CONDA_BLD_PATH/**/*.conda; do + magic run rattler-build upload prefix -c "mojo-community" "$file" || true +done + +rm $CONDA_BLD_PATH/**/*.conda diff --git a/scripts/tests.sh b/scripts/tests.sh new file mode 100755 index 0000000..87e3822 --- /dev/null +++ b/scripts/tests.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +TEMP_DIR=~/tmp +PACKAGE_NAME=gojo +mkdir -p $TEMP_DIR + +echo "[INFO] Building $PACKAGE_NAME package and copying tests." +cp -R test/ $TEMP_DIR +magic run mojo package src/$PACKAGE_NAME -o $TEMP_DIR/$PACKAGE_NAME.mojopkg + +echo "[INFO] Running tests..." +magic run mojo test $TEMP_DIR + +echo "[INFO] Cleaning up the test directory." +rm -R $TEMP_DIR diff --git a/benchmarks/__init__.mojo b/src/gojo/__init__.mojo similarity index 100% rename from benchmarks/__init__.mojo rename to src/gojo/__init__.mojo diff --git a/src/gojo/bufio/__init__.mojo b/src/gojo/bufio/__init__.mojo new file mode 100644 index 0000000..e18dc17 --- /dev/null +++ b/src/gojo/bufio/__init__.mojo @@ -0,0 +1,14 @@ +from .reader import Reader +from .writer import Writer +from .scan import Scanner, scan_words, scan_bytes, scan_lines, scan_runes + + +alias MIN_READ_BUFFER_SIZE = 16 +alias MAX_CONSECUTIVE_EMPTY_READS = 100 + +alias ERR_INVALID_UNREAD_BYTE = "bufio: invalid use of unread_byte" +alias ERR_INVALID_UNREAD_RUNE = "bufio: invalid use of unread_rune" +alias ERR_BUFFER_FULL = "bufio: buffer full" +alias ERR_NEGATIVE_COUNT = "bufio: negative count" +alias ERR_NEGATIVE_READ = "bufio: reader returned negative count from Read" +alias ERR_NEGATIVE_WRITE = "bufio: writer returned negative count from write" diff --git a/src/gojo/bufio/reader.mojo b/src/gojo/bufio/reader.mojo new file mode 100644 index 0000000..5345db0 --- /dev/null +++ b/src/gojo/bufio/reader.mojo @@ -0,0 +1,678 @@ +from utils import Span +from os import abort +from algorithm.memory import parallel_memcpy +import ..io +from ..bytes import index_byte +from ..strings import StringBuilder + + +fn copy[ + T: CollectionElement, is_trivial: Bool +](inout target: List[T, is_trivial], source: List[T, is_trivial], start: Int = 0) -> Int: + """Copies the contents of source into target at the same index. + + Args: + target: The buffer to copy into. + source: The buffer to copy from. + start: The index to start copying into. + + Returns: + The number of bytes copied. + """ + var count = 0 + + for i in range(len(source)): + if i + start > len(target): + target[i + start] = source[i] + else: + target.append(source[i]) + count += 1 + + return count + + +# buffered input +struct Reader[R: io.Reader, //](Sized, io.Reader, io.ByteReader, io.ByteScanner, io.WriterTo): + """Implements buffering for an io.Reader object. + + Examples: + ```mojo + import gojo.bytes + import gojo.bufio + var buf = bytes.Buffer(capacity=16) + _ = buf.write_string("Hello, World!") + var reader = bufio.Reader(buf^) + + var dest = List[UInt8, True](capacity=16) + _ = reader.read(dest) + dest.append(0) + print(String(dest)) # Output: Hello, World! + ``` + """ + + var buf: List[UInt8, True] + """Internal buffer.""" + var reader: R + """Reader provided by the client.""" + var read_pos: Int + """Buffer read position.""" + var write_pos: Int + """Buffer write position.""" + var last_byte: Int + """Last byte read for unread_byte; -1 means invalid.""" + var last_rune_size: Int + """Size of last rune read for unread_rune; -1 means invalid.""" + var err: Error + """Error encountered during reading.""" + + fn __init__( + inout self, + owned reader: R, + *, + capacity: Int = io.BUFFER_SIZE, + ): + """Initializes a new buffered reader with the provided reader and buffer capacity. + + Args: + reader: The reader to buffer. + capacity: The initial buffer capacity. + """ + self.buf = List[UInt8, True](capacity=capacity) + self.reader = reader^ + self.read_pos = 0 + self.write_pos = 0 + self.last_byte = -1 + self.last_rune_size = -1 + self.err = Error() + + fn __moveinit__(inout self, owned existing: Self): + self.buf = existing.buf^ + self.reader = existing.reader^ + self.read_pos = existing.read_pos + self.write_pos = existing.write_pos + self.last_byte = existing.last_byte + self.last_rune_size = existing.last_rune_size + self.err = existing.err^ + + fn __len__(self) -> Int: + """Returns the size of the underlying buffer in bytes.""" + return len(self.buf) + + # reset discards any buffered data, resets all state, and switches + # the buffered reader to read from r. + # Calling reset on the zero value of [Reader] initializes the internal buffer + # to the default size. + # Calling self.reset(b) (that is, resetting a [Reader] to itself) does nothing. + # fn reset[R: io.Reader](self, reader: R): + # # If a Reader r is passed to NewReader, NewReader will return r. + # # Different layers of code may do that, and then later pass r + # # to reset. Avoid infinite recursion in that case. + # if self == reader: + # return + + # # if self.buf == nil: + # # self.buf = make(InlineList[UInt8, io.BUFFER_SIZE], io.BUFFER_SIZE) + + # self.reset(self.buf, r) + + fn as_bytes_slice(ref [_]self) -> Span[UInt8, __lifetime_of(self)]: + """Returns the internal data as a Span[UInt8].""" + return Span[UInt8, __lifetime_of(self)](unsafe_ptr=self.buf.unsafe_ptr(), len=self.buf.size) + + fn reset(inout self, owned reader: R) -> None: + """Discards any buffered data, resets all state, and switches + the buffered reader to read from `reader`. Calling reset on the `Reader` returns the internal buffer to the default size. + + Args: + reader: The reader to buffer. + """ + self = Reader(reader^) + + fn fill(inout self) -> None: + """Reads a new chunk into the internal buffer from the reader.""" + # Slide existing data to beginning. + if self.read_pos > 0: + var data_to_slide = self.as_bytes_slice()[self.read_pos : self.write_pos] + for i in range(len(data_to_slide)): + self.buf[i] = data_to_slide[i] + + self.write_pos -= self.read_pos + self.read_pos = 0 + + # Compares to the capacity of the internal buffer. + # IE. var b = List[UInt8, True](capacity=4096), then trying to write at b[4096] and onwards will fail. + if self.write_pos >= self.buf.capacity: + abort("bufio.Reader: tried to fill full buffer") + + # Read new data: try a limited number of times. + var i: Int = MAX_CONSECUTIVE_EMPTY_READS + while i > 0: + var dest_ptr = self.buf.unsafe_ptr().offset(self.buf.size) + var bytes_read: Int + var err: Error + bytes_read, err = self.reader._read(dest_ptr, self.buf.capacity - self.buf.size) + if bytes_read < 0: + abort(ERR_NEGATIVE_READ) + + self.buf.size += bytes_read + self.write_pos += bytes_read + + if err: + self.err = err + return + + if bytes_read > 0: + return + + i -= 1 + + self.err = Error(str(io.ERR_NO_PROGRESS)) + + fn read_error(inout self) -> Error: + """Returns the error encountered during reading.""" + if not self.err: + return Error() + + var err = self.err + self.err = Error() + return err + + fn peek(inout self, number_of_bytes: Int) -> (Span[UInt8, __lifetime_of(self)], Error): + """Returns the next `number_of_bytes` bytes without advancing the reader. The bytes stop + being valid at the next read call. If `peek` returns fewer than `number_of_bytes` bytes, it + also returns an error explaining why the read is short. The error is + `ERR_BUFFER_FULL` if `number_of_bytes` is larger than the internal buffer's capacity. + + Calling `peek` prevents a `Reader.unread_byte` or `Reader.unread_rune` call from succeeding + until the next read operation. + + Args: + number_of_bytes: The number of bytes to peek. + + Returns: + A reference to the bytes in the internal buffer, and an error if one occurred. + """ + if number_of_bytes < 0: + return self.as_bytes_slice()[0:0], Error(ERR_NEGATIVE_COUNT) + + self.last_byte = -1 + self.last_rune_size = -1 + + while self.write_pos - self.read_pos < number_of_bytes and self.write_pos - self.read_pos < self.buf.capacity: + self.fill() # self.write_pos-self.read_pos < self.capacity => buffer is not full + + if number_of_bytes > self.buf.size: + return self.as_bytes_slice()[self.read_pos : self.write_pos], Error(ERR_BUFFER_FULL) + + # 0 <= n <= self.buf.size + var err = Error() + var available_space = self.write_pos - self.read_pos + if available_space < number_of_bytes: + # not enough data in buffer + err = self.read_error() + if not err: + err = Error(ERR_BUFFER_FULL) + + return self.as_bytes_slice()[self.read_pos : self.read_pos + number_of_bytes], err + + fn discard(inout self, number_of_bytes: Int) -> (Int, Error): + """Skips the next `number_of_bytes` bytes. + + If fewer than `number_of_bytes` bytes are skipped, `discard` returns an error. + If 0 <= `number_of_bytes` <= `self.buffered()`, `discard` is guaranteed to succeed without + reading from the underlying `io.Reader`. + + Args: + number_of_bytes: The number of bytes to skip. + + Returns: + The number of bytes skipped, and an error if one occurred. + """ + if number_of_bytes < 0: + return 0, Error(ERR_NEGATIVE_COUNT) + + if number_of_bytes == 0: + return 0, Error() + + self.last_byte = -1 + self.last_rune_size = -1 + + var remain = number_of_bytes + while True: + var skip = self.buffered() + if skip == 0: + self.fill() + skip = self.buffered() + + if skip > remain: + skip = remain + + self.read_pos += skip + remain -= skip + if remain == 0: + return number_of_bytes, Error() + + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + """Reads data into `dest`. + + The bytes are taken from at most one `read` on the underlying `io.Reader`, + hence n may be less than `len(src`). + + To read exactly `len(src)` bytes, use `io.read_full(b, src)`. + If the underlying `io.Reader` can return a non-zero count with `io.EOF`, + then this `read` method can do so as well; see the `io.Reader` docs. + + Args: + dest: The buffer to read data into. + capacity: The capacity of the destination buffer. + + Returns: + The number of bytes read into dest. + """ + if capacity == 0: + if self.buffered() > 0: + return 0, Error() + return 0, self.read_error() + + var bytes_read: Int = 0 + if self.read_pos == self.write_pos: + if capacity >= len(self.buf): + # Large read, empty buffer. + # Read directly into dest to avoid copy. + var bytes_read: Int + bytes_read, self.err = self.reader._read(dest, capacity) + + if bytes_read < 0: + abort(ERR_NEGATIVE_READ) + + if bytes_read > 0: + self.last_byte = int(dest[bytes_read - 1]) + self.last_rune_size = -1 + + return bytes_read, self.read_error() + + # One read. + # Do not use self.fill, which will loop. + self.read_pos = 0 + self.write_pos = 0 + var buf = self.buf.unsafe_ptr().offset(self.buf.size) + var bytes_read: Int + bytes_read, self.err = self.reader._read(buf, self.buf.capacity - self.buf.size) + + if bytes_read < 0: + abort(ERR_NEGATIVE_READ) + + if bytes_read == 0: + return 0, self.read_error() + + self.write_pos += bytes_read + + # copy as much as we can + var source = self.as_bytes_slice()[self.read_pos : self.write_pos] + var bytes_to_write = min(capacity, len(source)) + parallel_memcpy(dest, source.unsafe_ptr(), bytes_to_write) + self.read_pos += bytes_to_write + self.last_byte = int(self.buf[self.read_pos - 1]) + self.last_rune_size = -1 + return bytes_to_write, Error() + + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + """Reads data into `dest`. + + The bytes are taken from at most one `read` on the underlying `io.Reader`, + hence n may be less than `len(src`). + + To read exactly `len(src)` bytes, use `io.read_full(b, src)`. + If the underlying `io.Reader` can return a non-zero count with `io.EOF`, + then this `read` method can do so as well; see the `io.Reader` docs. + + Args: + dest: The buffer to read data into. + + Returns: + The number of bytes read into dest. + """ + var dest_ptr = dest.unsafe_ptr().offset(dest.size) + var bytes_read: Int + var err: Error + bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) + dest.size += bytes_read + + return bytes_read, err + + fn read_byte(inout self) -> (UInt8, Error): + """Reads and returns a single byte from the internal buffer. + + Returns: + The byte read from the internal buffer. If no byte is available, returns an error. + """ + self.last_rune_size = -1 + while self.read_pos == self.write_pos: + if self.err: + return UInt8(0), self.read_error() + self.fill() # buffer is empty + + var c = self.as_bytes_slice()[self.read_pos] + self.read_pos += 1 + self.last_byte = int(c) + return c, Error() + + fn unread_byte(inout self) -> Error: + """Unreads the last byte. Only the most recently read byte can be unread. + + Returns: + `unread_byte` returns an error if the most recent method called on the + `Reader` was not a read operation. Notably, `Reader.peek`, `Reader.discard`, and `Reader.write_to` are not + considered read operations. + """ + if self.last_byte < 0 or self.read_pos == 0 and self.write_pos > 0: + return Error(ERR_INVALID_UNREAD_BYTE) + + # self.read_pos > 0 or self.write_pos == 0 + if self.read_pos > 0: + self.read_pos -= 1 + else: + # self.read_pos == 0 and self.write_pos == 0 + self.write_pos = 1 + + self.as_bytes_slice()[self.read_pos] = self.last_byte + self.last_byte = -1 + self.last_rune_size = -1 + return Error() + + # # read_rune reads a single UTF-8 encoded Unicode character and returns the + # # rune and its size in bytes. If the encoded rune is invalid, it consumes one byte + # # and returns unicode.ReplacementChar (U+FFFD) with a size of 1. + # fn read_rune(inout self) (r rune, size int, err error): + # for self.read_pos+utf8.UTFMax > self.write_pos and !utf8.FullRune(self.as_bytes_slice()[self.read_pos:self.write_pos]) and self.err == nil and self.write_pos-self.read_pos < self.buf.capacity: + # self.fill() # self.write_pos-self.read_pos < len(buf) => buffer is not full + + # self.last_rune_size = -1 + # if self.read_pos == self.write_pos: + # return 0, 0, self.read_poseadErr() + + # r, size = rune(self.as_bytes_slice()[self.read_pos]), 1 + # if r >= utf8.RuneSelf: + # r, size = utf8.DecodeRune(self.as_bytes_slice()[self.read_pos:self.write_pos]) + + # self.read_pos += size + # self.last_byte = int(self.as_bytes_slice()[self.read_pos-1]) + # self.last_rune_size = size + # return r, size, nil + + # # unread_rune unreads the last rune. If the most recent method called on + # # the [Reader] was not a [Reader.read_rune], [Reader.unread_rune] returns an error. (In this + # # regard it is stricter than [Reader.unread_byte], which will unread the last byte + # # from any read operation.) + # fn unread_rune() error: + # if self.last_rune_size < 0 or self.read_pos < self.last_rune_size: + # return ERR_INVALID_UNREAD_RUNE + + # self.read_pos -= self.last_rune_size + # self.last_byte = -1 + # self.last_rune_size = -1 + # return nil + + fn buffered(self) -> Int: + """Returns the number of bytes that can be read from the current buffer. + + Returns: + The number of bytes that can be read from the current buffer. + """ + return self.write_pos - self.read_pos + + fn _search_buffer(inout self, delim: UInt8) -> (Span[UInt8, __lifetime_of(self)], Error): + var start = 0 # search start index + while True: + # Search buffer. + var i = index_byte(self.as_bytes_slice()[self.read_pos + start : self.write_pos], delim) + if i >= 0: + i += start + line = self.as_bytes_slice()[self.read_pos : self.read_pos + i + 1] + self.read_pos += i + 1 + return line, Error() + + # Pending error? + if self.err: + line = self.as_bytes_slice()[self.read_pos : self.write_pos] + self.read_pos = self.write_pos + err = self.read_error() + return line, err + + # Buffer full? + if self.buffered() >= self.buf.capacity: + self.read_pos = self.write_pos + line = self.as_bytes_slice() + err = Error(ERR_BUFFER_FULL) + return line, err + + start = self.write_pos - self.read_pos # do not rescan area we scanned before + self.fill() # buffer is not full + + fn read_slice(inout self, delim: UInt8) -> (Span[UInt8, __lifetime_of(self)], Error): + """Reads until the first occurrence of `delim` in the input, returning a slice pointing at the bytes in the buffer. + It includes the first occurrence of the delimiter. The bytes stop being valid at the next read. + + If `read_slice` encounters an error before finding a delimiter, it returns all the data in the buffer and the error itself (often `io.EOF`). + `read_slice` fails with error `ERR_BUFFER_FULL` if the buffer fills without a `delim`. + Because the data returned from `read_slice` will be overwritten by the next I/O operation, + most clients should use `Reader.read_bytes` or `Reader.read_string` instead. + `read_slice` returns an error if and only if line does not end in delim. + + Args: + delim: The delimiter to search for. + + Returns: + A reference to a Span of bytes from the internal buffer. + """ + var result = self._search_buffer(delim) + + # Handle last byte, if any. + var i = len(result[0]) - 1 + if i >= 0: + self.last_byte = int(result[0][i]) + self.last_rune_size = -1 + + return result[0], result[1] + + fn read_line(inout self) -> (List[UInt8, True], Bool): + """Low-level line-reading primitive. Most callers should use + `Reader.read_bytes('\\n')` or `Reader.read_string]('\\n')` instead or use a `Scanner`. + + `read_line` tries to return a single line, not including the end-of-line bytes. + + The text returned from `read_line` does not include the line end ("\\r\\n" or "\\n"). + No indication or error is given if the input ends without a final line end. + Calling `Reader.unread_byte` after `read_line` will always unread the last byte read + (possibly a character belonging to the line end) even if that byte is not + part of the line returned by `read_line`. + """ + var line: Span[UInt8, __lifetime_of(self)] + var err: Error + line, err = self.read_slice(ord("\n")) + + if err and str(err) == ERR_BUFFER_FULL: + # Handle the case where "\r\n" straddles the buffer. + if len(line) > 0 and line[len(line) - 1] == ord("\r"): + # Put the '\r' back on buf and drop it from line. + # Let the next call to read_line check for "\r\n". + if self.read_pos == 0: + # should be unreachable + abort("bufio: tried to rewind past start of buffer") + + self.read_pos -= 1 + line = line[: len(line) - 1] + return List[UInt8, True](line), True + + if len(line) == 0: + return List[UInt8, True](line), False + + if line[len(line) - 1] == ord("\n"): + var drop = 1 + if len(line) > 1 and line[len(line) - 2] == ord("\r"): + drop = 2 + + line = line[: len(line) - drop] + + return List[UInt8, True](line), False + + fn collect_fragments( + inout self, delim: UInt8 + ) -> (List[List[UInt8, True]], Span[UInt8, __lifetime_of(self)], Int, Error): + """Reads until the first occurrence of `delim` in the input. It + returns (list of full buffers, remaining bytes before `delim`, total number + of bytes in the combined first two elements, error). + + Args: + delim: The delimiter to search for. + + Returns: + List of full buffers, the remaining bytes before `delim`, the total number of bytes in the combined first two elements, and an error if one occurred. + """ + # Use read_slice to look for delim, accumulating full buffers. + var err = Error() + var full_buffers = List[List[UInt8, True]]() + var total_len = 0 + var frag: Span[UInt8, __lifetime_of(self)] + while True: + frag, err = self.read_slice(delim) + if not err: + break + + var read_slice_error = err + if str(read_slice_error) != ERR_BUFFER_FULL: + err = read_slice_error + break + + # Make a copy of the buffer Span. + var buf = List[UInt8, True](frag) + full_buffers.append(buf) + total_len += len(buf) + + total_len += len(frag) + return full_buffers, frag, total_len, err + + fn read_bytes(inout self, delim: UInt8) -> (List[UInt8, True], Error): + """Reads until the first occurrence of `delim` in the input, + returning a List containing the data up to and including the delimiter. + + If `read_bytes` encounters an error before finding a delimiter, + it returns the data read before the error and the error itself (often `io.EOF`). + `read_bytes` returns an error if and only if the returned data does not end in + `delim`. For simple uses, a `Scanner` may be more convenient. + + Args: + delim: The delimiter to search for. + + Returns: + The a copy of the bytes from the internal buffer as a list. + """ + var full: List[List[UInt8, True]] + var frag: Span[UInt8, __lifetime_of(self)] + var n: Int + var err: Error + full, frag, n, err = self.collect_fragments(delim) + + # Allocate new buffer to hold the full pieces and the fragment. + var buf = List[UInt8, True](capacity=n) + n = 0 + + # copy full pieces and fragment in. + for i in range(len(full)): + n += copy(buf, full[i], n) + + _ = copy(buf, frag, n) + return buf, err + + fn read_string(inout self, delim: UInt8) -> (String, Error): + """Reads until the first occurrence of `delim` in the input, + returning a string containing the data up to and including the delimiter. + + If `read_string` encounters an error before finding a delimiter, + it returns the data read before the error and the error itself (often `io.EOF`). + read_string returns an error if and only if the returned data does not end in + `delim`. For simple uses, a `Scanner` may be more convenient. + + Args: + delim: The delimiter to search for. + + Returns: + A copy of the data from the internal buffer as a String. + """ + var full: List[List[UInt8, True]] + var frag: Span[UInt8, __lifetime_of(self)] + var n: Int + var err: Error + full, frag, n, err = self.collect_fragments(delim) + + # Allocate new buffer to hold the full pieces and the fragment. + var buf = StringBuilder(capacity=n) + + # copy full pieces and fragment in. + for i in range(len(full)): + var buffer = full[i] + _ = buf.write(Span(buffer)) + + _ = buf.write(frag) + return str(buf), err + + fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int, Error): + """Writes the internal buffer to the writer. + This may make multiple calls to the `Reader.read` method of the underlying `Reader`. + + Args: + writer: The writer to write to. + + Returns: + The number of bytes written. + """ + self.last_byte = -1 + self.last_rune_size = -1 + + var bytes_written: Int + var err: Error + bytes_written, err = self.write_buf(writer) + if err: + return bytes_written, err + + # internal buffer not full, fill before writing to writer + if (self.write_pos - self.read_pos) < self.buf.capacity: + self.fill() + + while self.read_pos < self.write_pos: + # self.read_pos < self.write_pos => buffer is not empty + var bw: Int + var err: Error + bw, err = self.write_buf(writer) + bytes_written += bw + + self.fill() # buffer is empty + + return bytes_written, Error() + + fn write_buf[W: io.Writer](inout self, inout writer: W) -> (Int, Error): + """Writes the `Reader`'s buffer to the `writer`. + + Args: + writer: The writer to write to. + + Returns: + The number of bytes written. + """ + # Nothing to write + if self.read_pos == self.write_pos: + return Int(0), Error() + + # Write the buffer to the writer, if we hit EOF it's fine. That's not a failure condition. + var bytes_written: Int + var err: Error + var buf_to_write = self.as_bytes_slice()[self.read_pos : self.write_pos] + bytes_written, err = writer.write(List[UInt8, True](buf_to_write)) + if err: + return bytes_written, err + + if bytes_written < 0: + abort(ERR_NEGATIVE_WRITE) + + self.read_pos += bytes_written + return Int(bytes_written), Error() diff --git a/gojo/bufio/scan.mojo b/src/gojo/bufio/scan.mojo similarity index 55% rename from gojo/bufio/scan.mojo rename to src/gojo/bufio/scan.mojo index 64a7cd3..54325de 100644 --- a/gojo/bufio/scan.mojo +++ b/src/gojo/bufio/scan.mojo @@ -1,91 +1,130 @@ +from utils import StringSlice, Span +from os import abort +from algorithm.memory import parallel_memcpy +from bit import count_leading_zeros import ..io -from ..builtins import copy, panic -from ..builtins.bytes import index_byte -from .bufio import MAX_CONSECUTIVE_EMPTY_READS +from ..bytes import index_byte alias MAX_INT: Int = 2147483647 -struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The function to split the tokens. - """Scanner provides a convenient Interface for reading data such as +struct Scanner[R: io.Reader, //, split: SplitFunction = scan_lines](): + """`Scanner` provides a convenient interface for reading data such as a file of newline-delimited lines of text. Successive calls to - the [Scanner.Scan] method will step through the 'tokens' of a file, skipping + the `Scanner.scan` method will step through the 'tokens' of a file, skipping the bytes between the tokens. The specification of a token is - defined by a split function of type [SplitFunction]; the default split - function breaks the input Into lines with line termination stripped. [Scanner.split] - fntions are defined in this package for scanning a file Into + defined by a split function of type `SplitFunction`. + + The default split function breaks the input int lines with line termination stripped. + `Scanner.split` functions are defined in this package for scanning a file into lines, bytes, UTF-8-encoded runes, and space-delimited words. The client may instead provide a custom split function. Scanning stops unrecoverably at EOF, the first I/O error, or a token too - large to fit in the [Scanner.buffer]. When a scan stops, the reader may have + large to fit in the `Scanner.buffer`. When a scan stops, the reader may have advanced arbitrarily far past the last token. Programs that need more control over error handling or large tokens, or must run sequential scans - on a reader, should use [bufio.Reader] instead.""" - - var reader: R # The reader provided by the client. - var max_token_size: Int # Maximum size of a token; modified by tests. - var token: List[UInt8] # Last token returned by split. - var buf: List[UInt8] # buffer used as argument to split. - var start: Int # First non-processed byte in buf. - var end: Int # End of data in buf. - var empties: Int # Count of successive empty tokens. - var scan_called: Bool # Scan has been called; buffer is in use. - var done: Bool # Scan has finished. + on a reader, should use `bufio.Reader` instead. + """ + + var reader: R + """The reader provided by the client.""" + var max_token_size: Int + """Maximum size of a token; modified by tests.""" + var token: List[UInt8, True] + """Last token returned by split.""" + var buf: List[UInt8, True] + """Internal buffer used as argument to split.""" + var start: Int + """First non-processed byte in buf.""" + var end: Int + """End of data in buf.""" + var empties: Int + """Count of successive empty tokens.""" + var scan_called: Bool + """Scan has been called; buffer is in use.""" + var done: Bool + """Scan has finished.""" var err: Error + """Error encountered during scanning.""" fn __init__( inout self, owned reader: R, + *, + capacity: Int = START_BUF_SIZE, max_token_size: Int = MAX_SCAN_TOKEN_SIZE, - token: List[UInt8] = List[UInt8](capacity=io.BUFFER_SIZE), - buf: List[UInt8] = List[UInt8](capacity=io.BUFFER_SIZE), - start: Int = 0, - end: Int = 0, - empties: Int = 0, - scan_called: Bool = False, - done: Bool = False, ): + """Initializes a new Scanner. + + Params: + R: The type of io.Reader. + split: The split function to use. + + Args: + reader: The reader to scan. + capacity: The initial capacity of the internal buffer. + max_token_size: The maximum size of a token. + """ self.reader = reader^ self.max_token_size = max_token_size - self.token = token - self.buf = buf - self.start = start - self.end = end - self.empties = empties - self.scan_called = scan_called - self.done = done + self.token = List[UInt8, True](capacity=capacity) + self.buf = List[UInt8, True](capacity=capacity) + self.start = 0 + self.end = 0 + self.empties = 0 + self.scan_called = False + self.done = False self.err = Error() - @always_inline - fn as_bytes_slice(self: Reference[Self]) -> Span[UInt8, self.is_mutable, self.lifetime]: - """Returns the internal buffer data as a Span[UInt8].""" - return Span[UInt8, self.is_mutable, self.lifetime](self[].buf) - - fn current_token_as_bytes(self) -> List[UInt8]: - """Returns the most recent token generated by a call to [Scanner.Scan]. - The underlying array may point to data that will be overwritten - by a subsequent call to Scan. It does no allocation. - """ + fn __moveinit__(inout self, owned other: Self) -> None: + self.reader = other.reader^ + self.max_token_size = other.max_token_size + self.token = other.token^ + self.buf = other.buf^ + self.start = other.start + self.end = other.end + self.empties = other.empties + self.scan_called = other.scan_called + self.done = other.done + self.err = other.err^ + + fn as_bytes_slice(ref [_]self) -> Span[UInt8, __lifetime_of(self)]: + """Returns the internal data as a Span[UInt8].""" + return Span[UInt8, __lifetime_of(self)](unsafe_ptr=self.buf.unsafe_ptr(), len=self.buf.size) + + fn current_token_as_bytes_slice(ref [_]self) -> Span[UInt8, __lifetime_of(self)]: + """Returns the most recent token generated by a call to `Scanner.scan`.""" + return Span[UInt8, __lifetime_of(self)](unsafe_ptr=self.token.unsafe_ptr(), len=self.token.size) + + fn current_token_as_string_slice(ref [_]self) -> StringSlice[__lifetime_of(self)]: + """Returns the most recent token generated by a call to `Scanner.scan`.""" + return StringSlice[__lifetime_of(self)](unsafe_from_utf8_ptr=self.token.unsafe_ptr(), len=self.token.size) + + fn current_token_as_bytes(self) -> List[UInt8, True]: + """Returns the most recent token generated by a call to `Scanner.scan`.""" return self.token fn current_token(self) -> String: - """Returns the most recent token generated by a call to [Scanner.Scan] - as a newly allocated string holding its bytes.""" - var copy = self.token - copy.append(0) - return String(copy^) + """Returns the most recent token generated by a call to `Scanner.scan`.""" + return self.current_token_as_string_slice() fn scan(inout self) -> Bool: - """Advances the [Scanner] to the next token, which will then be - available through the [Scanner.current_token_as_bytes] or [Scanner.current_token] method. + """Advances the `Scanner` to the next token, which will then be + available through the `Scanner.current_token_as_bytes`, `Scanner.current_token`, + `Scanner.current_token_as_bytes_slice`, and `Scanner.current_token_as_string_slice` methods. + It returns False when there are no more tokens, either by reaching the end of the input or an error. - After Scan returns False, the [Scanner.Err] method will return any error that - occurred during scanning, except if it was [io.EOF], [Scanner.Err]. - Scan raises an Error if the split function returns too many empty + After Scan returns False, the `Scanner.set_err` method will return any error that + occurred during scanning, except if it was `io.EOF` or `Scanner.set_err`. + + `scan` raises an Error if the split function returns too many empty tokens without advancing the input. This is a common error mode for scanners. + + Returns: + True if a token was found, False otherwise. """ if self.done: return False @@ -98,8 +137,8 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi # a chance to recover any remaining, possibly empty token. if (self.end > self.start) or self.err: var advance: Int - var token = List[UInt8](capacity=io.BUFFER_SIZE) - var err = Error() + var token: List[UInt8, True] + var err: Error var at_eof = False if self.err: at_eof = True @@ -108,7 +147,7 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi if str(err) == str(ERR_FINAL_TOKEN): self.token = token self.done = True - # When token is not nil, it means the scanning stops + # When token is not empty, it means the scanning stops # with a trailing token, and thus the return value # should be True to indicate the existence of the token. return len(token) != 0 @@ -127,7 +166,7 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi # Returning tokens not advancing input at EOF. self.empties += 1 if self.empties > MAX_CONSECUTIVE_EMPTY_READS: - panic("bufio.Scan: too many empty tokens without progressing") + abort("bufio.Scan: too many empty tokens without progressing") return True @@ -143,9 +182,10 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi # First, shift data to beginning of buffer if there's lots of empty space # or space is needed. if self.start > 0 and (self.end == len(self.buf) or self.start > int(len(self.buf) / 2)): - _ = copy(self.buf, self.as_bytes_slice()[self.start : self.end]) + parallel_memcpy(self.buf.unsafe_ptr(), self.buf.unsafe_ptr().offset(self.start), self.end - self.start) self.end -= self.start self.start = 0 + self.buf.size = self.end # Is the buffer full? If so, resize. if self.end == len(self.buf): @@ -158,10 +198,10 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi if new_size == 0: new_size = START_BUF_SIZE - # Make a new List[UInt8] buffer and copy the elements in + # Make a new List[UInt8, True] buffer and copy the elements in new_size = min(new_size, self.max_token_size) - var new_buf = List[UInt8](capacity=new_size) - _ = copy(new_buf, self.buf[self.start : self.end]) + var new_buf = self.buf[self.start : self.end] # slicing returns a new list + new_buf.reserve(new_size) self.buf = new_buf self.end -= self.start self.start = 0 @@ -171,13 +211,13 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi # be extra careful: Scanner is for safe, simple jobs. var loop = 0 while True: - var buf = self.as_bytes_slice()[self.end :] - # Catch any reader errors and set the internal error field to that err instead of bubbling it up. + var dest_ptr = self.buf.unsafe_ptr().offset(self.end) var bytes_read: Int var err: Error - bytes_read, err = self.reader._read(buf, self.buf.capacity - self.end) - if bytes_read < 0 or len(buf) - self.end < bytes_read: + bytes_read, err = self.reader._read(dest_ptr, self.buf.capacity - self.buf.size) + self.buf.size += bytes_read + if bytes_read < 0 or self.buf.size - self.end < bytes_read: self.set_err(ERR_BAD_READ_COUNT) break @@ -195,7 +235,7 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi self.set_err(io.ERR_NO_PROGRESS) break - fn set_err(inout self, err: Error): + fn set_err(inout self, err: Error) -> None: """Set the internal error field to the provided error. Args: @@ -229,35 +269,35 @@ struct Scanner[R: io.Reader, split: SplitFunction = scan_lines](): # The functi return True -# SplitFunction is the signature of the split function used to tokenize the -# input. The arguments are an initial substring of the remaining unprocessed -# data and a flag, at_eof, that reports whether the [Reader] has no more data -# to give. The return values are the number of bytes to advance the input -# and the next token to return to the user, if any, plus an error, if any. -# -# Scanning stops if the function returns an error, in which case some of -# the input may be discarded. If that error is [ERR_FINAL_TOKEN], scanning -# stops with no error. A non-nil token delivered with [ERR_FINAL_TOKEN] -# will be the last token, and a nil token with [ERR_FINAL_TOKEN] -# immediately stops the scanning. -# -# Otherwise, the [Scanner] advances the input. If the token is not nil, -# the [Scanner] returns it to the user. If the token is nil, the -# Scanner reads more data and continues scanning; if there is no more -# data--if at_eof was True--the [Scanner] returns. If the data does not -# yet hold a complete token, for instance if it has no newline while -# scanning lines, a [SplitFunction] can return (0, nil, nil) to signal the -# [Scanner] to read more data Into the slice and try again with a -# longer slice starting at the same poInt in the input. -# -# The function is never called with an empty data slice unless at_eof -# is True. If at_eof is True, however, data may be non-empty and, -# as always, holds unprocessed text. alias SplitFunction = fn (data: Span[UInt8], at_eof: Bool) -> ( Int, - List[UInt8], + List[UInt8, True], Error, ) +"""Signature of the split function used to tokenize the +input. The arguments are an initial substring of the remaining unprocessed +data and a flag, at_eof, that reports whether the `Reader` has no more data +to give. The return values are the number of bytes to advance the input +and the next token to return to the user, if any, plus an error, if any. + +Scanning stops if the function returns an error, in which case some of +the input may be discarded. If that error is `ERR_FINAL_TOKEN`, scanning +stops with no error. A token delivered with `ERR_FINAL_TOKEN` +will be the last token, and an empty token with `ERR_FINAL_TOKEN` +immediately stops the scanning. + +Otherwise, the `Scanner` advances the input. If the token is not nil, +the `Scanner` returns it to the user. If the token is nil, the +Scanner reads more data and continues scanning; if there is no more +data--if `at_eof` was True--the `Scanner` returns. If the data does not +yet hold a complete token, for instance if it has no newline while +scanning lines, a `SplitFunction` can return (0, List[UInt8, True](), Error()) to signal the +`Scanner` to read more data Into the slice and try again with a +longer slice starting at the same poInt in the input. + +The function is never called with an empty data slice unless at_eof +is True. If `at_eof` is True, however, data may be non-empty and, +as always, holds unprocessed text.""" # Errors returned by Scanner. alias ERR_TOO_LONG = Error("bufio.Scanner: token too long") @@ -265,35 +305,31 @@ alias ERR_NEGATIVE_ADVANCE = Error("bufio.Scanner: SplitFunction returns negativ alias ERR_ADVANCE_TOO_FAR = Error("bufio.Scanner: SplitFunction returns advance count beyond input") alias ERR_BAD_READ_COUNT = Error("bufio.Scanner: Read returned impossible count") -# ERR_FINAL_TOKEN is a special sentinel error value. It is Intended to be -# returned by a split function to indicate that the scanning should stop -# with no error. If the token being delivered with this error is not nil, -# the token is the last token. -# -# The value is useful to stop processing early or when it is necessary to -# deliver a final empty token (which is different from a nil token). -# One could achieve the same behavior with a custom error value but -# providing one here is tidier. -# See the emptyFinalToken example for a use of this value. + alias ERR_FINAL_TOKEN = Error("final token") +"""Special sentinel error value. It is Intended to be +returned by a split function to indicate that the scanning should stop +with no error. If the token being delivered with this error is not nil, +the token is the last token. +The value is useful to stop processing early or when it is necessary to +deliver a final empty token (which is different from a nil token). +One could achieve the same behavior with a custom error value but +providing one here is tidier.""" -# MAX_SCAN_TOKEN_SIZE is the maximum size used to buffer a token -# unless the user provides an explicit buffer with [Scanner.buffer]. -# The actual maximum token size may be smaller as the buffer -# may need to include, for instance, a newline. -alias MAX_SCAN_TOKEN_SIZE = 64 * 1024 -alias START_BUF_SIZE = 4096 # Size of initial allocation for buffer. +alias MAX_SCAN_TOKEN_SIZE = 64 * 1024 +"""Maximum size used to buffer a token +unless the user provides an explicit buffer with `Scanner.buffer`. +The actual maximum token size may be smaller as the buffer +may need to include, for instance, a newline.""" -fn new_scanner[R: io.Reader](owned reader: R) -> Scanner[R]: - """Returns a new [Scanner] to read from r. - The split function defaults to [scan_lines].""" - return Scanner(reader^) +alias START_BUF_SIZE = 4096 +"""Size of initial allocation for buffer.""" ###### split functions ###### -fn scan_bytes(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): +fn scan_bytes(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8, True], Error): """Returns each byte as a token. Args: @@ -304,13 +340,13 @@ fn scan_bytes(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): The number of bytes to advance the input, token in bytes, and an error if one occurred. """ if at_eof and len(data) == 0: - return 0, List[UInt8](), Error() + return 0, List[UInt8, True](), Error() - return 1, List[UInt8](data[0:1]), Error() + return 1, List[UInt8, True](data[0:1]), Error() -fn scan_runes(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): - """Returns each UTF-8-encoded rune as a token. +fn scan_runes(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8, True], Error): + """Returns each UTF-8 encoded rune as a token. Args: data: The data to split. @@ -320,24 +356,23 @@ fn scan_runes(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): The number of bytes to advance the input, token in bytes, and an error if one occurred. """ if at_eof and len(data) == 0: - return 0, List[UInt8](), Error() + return 0, List[UInt8, True](), Error() # Number of bytes of the current character - var char_length = int( - (DTypePointer[DType.uint8](data.unsafe_ptr()).load() >> 7 == 0).cast[DType.uint8]() * 1 - + countl_zero(~DTypePointer[DType.uint8](data.unsafe_ptr()).load()) - ) + var lhs = (((UnsafePointer[Scalar[DType.uint8]].load(data.unsafe_ptr()) >> 7) == 0) * 1).cast[DType.uint8]() + var rhs = count_leading_zeros(~UnsafePointer[Scalar[DType.uint8]].load(data.unsafe_ptr())) + var char_length = int(lhs + rhs) # Copy N bytes into new pointer and construct List. var sp = UnsafePointer[UInt8].alloc(char_length) - memcpy(sp, data.unsafe_ptr(), char_length) - var result = List[UInt8](unsafe_pointer=sp, size=char_length, capacity=char_length) + parallel_memcpy(sp, data.unsafe_ptr(), char_length) + var result = List[UInt8, True](unsafe_pointer=sp, size=char_length, capacity=char_length) return char_length, result, Error() -fn drop_carriage_return(data: Span[UInt8]) -> List[UInt8]: - """Drops a terminal \r from the data. +fn drop_carriage_return(data: Span[UInt8]) -> List[UInt8, True]: + """Drops a terminal \\r from the data. Args: data: The data to strip. @@ -352,7 +387,7 @@ fn drop_carriage_return(data: Span[UInt8]) -> List[UInt8]: return data -fn scan_lines(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): +fn scan_lines(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8, True], Error): """Returns each line of text, stripped of any trailing end-of-line marker. The returned line may be empty. The end-of-line marker is one optional carriage return followed by one mandatory newline. The last non-empty line of input will be returned even if it has no @@ -366,7 +401,7 @@ fn scan_lines(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): The number of bytes to advance the input. """ if at_eof and len(data) == 0: - return 0, List[UInt8](), Error() + return 0, List[UInt8, True](), Error() var i = index_byte(data, ord("\n")) if i >= 0: @@ -389,7 +424,7 @@ fn is_space(r: UInt8) -> Bool: # TODO: Handle runes and utf8 decoding. For now, just assuming single byte length. -fn scan_words(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): +fn scan_words(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8, True], Error): """Returns each space-separated word of text, with surrounding spaces deleted. It will never return an empty string. @@ -417,13 +452,13 @@ fn scan_words(data: Span[UInt8], at_eof: Bool) -> (Int, List[UInt8], Error): while i < len(data): width = len(data[i]) if is_space(data[i]): - return i + width, List[UInt8](data[start:i]), Error() + return i + width, List[UInt8, True](data[start:i]), Error() i += width # If we're at EOF, we have a final, non-empty, non-terminated word. Return it. if at_eof and len(data) > start: - return len(data), List[UInt8](data[start:]), Error() + return len(data), List[UInt8, True](data[start:]), Error() # Request more data. - return start, List[UInt8](), Error() + return start, List[UInt8, True](), Error() diff --git a/src/gojo/bufio/writer.mojo b/src/gojo/bufio/writer.mojo new file mode 100644 index 0000000..3fd48cf --- /dev/null +++ b/src/gojo/bufio/writer.mojo @@ -0,0 +1,285 @@ +from utils import Span +import ..io +from algorithm.memory import parallel_memcpy + + +# buffered output +struct Writer[W: io.Writer, //](Sized, io.Writer, io.ByteWriter, io.StringWriter, io.ReaderFrom): + """Implements buffering for an `io.Writer` object. + If an error occurs writing to a `Writer`, no more data will be + accepted and all subsequent writes, and `Writer.flush`, will return the error. + + After all data has been written, the client should call the + `Writer.flush` method to guarantee all data has been forwarded to + the underlying `io.Writer`. + + Examples: + ```mojo + import gojo.bytes + import gojo.bufio + var buf = bytes.Buffer(capacity=16) + var writer = bufio.Writer(buf^) + + var dest = List[UInt8, True](capacity=16) + var src = String("Hello, World!") + _ = writer.write_string(dest) + ``` + . + """ + + var buf: List[UInt8, True] + """Internal buffer of bytes.""" + var bytes_written: Int + """Number of bytes written to the buffer.""" + var writer: W + """Writer provided by the client.""" + var err: Error + """Error encountered during writing.""" + + fn __init__( + inout self, + owned writer: W, + *, + capacity: Int = io.BUFFER_SIZE, + ): + """Initializes a new buffered writer with the provided writer and buffer capacity. + + Args: + writer: The writer to buffer. + capacity: The initial buffer capacity. + """ + self.buf = List[UInt8, True](capacity=capacity) + self.bytes_written = 0 + self.writer = writer^ + self.err = Error() + + fn __moveinit__(inout self, owned existing: Self): + self.buf = existing.buf^ + self.bytes_written = existing.bytes_written + self.writer = existing.writer^ + self.err = existing.err^ + + fn __len__(self) -> Int: + """Returns the size of the underlying buffer in bytes.""" + return len(self.buf) + + fn as_bytes_slice(ref [_]self) -> Span[UInt8, __lifetime_of(self.buf)]: + """Returns the internal data as a Span[UInt8].""" + return Span[UInt8, __lifetime_of(self.buf)](unsafe_ptr=self.buf.unsafe_ptr(), len=self.buf.size) + + fn reset(inout self, owned writer: W) -> None: + """Discards any unflushed buffered data, clears any error, and + resets the internal buffer to write its output to `writer`. + Calling `reset` initializes the internal buffer to the default size. + + Args: + writer: The writer to write to. + """ + self.err = Error() + self.bytes_written = 0 + self.writer = writer^ + + fn flush(inout self) -> Error: + """Writes any buffered data to the underlying io.Writer`. + + Returns: + An error if one occurred during writing. + """ + # Prior to attempting to flush, check if there's a pre-existing error or if there's nothing to flush. + var err = Error() + if self.err: + return self.err + if self.bytes_written == 0: + return err + + var bytes_written: Int = 0 + bytes_written, err = self.writer.write(self.as_bytes_slice()[0 : self.bytes_written]) + + # If the write was short, set a short write error and try to shift up the remaining bytes. + if bytes_written < self.bytes_written and not err: + err = Error(str(io.ERR_SHORT_WRITE)) + + if err: + if bytes_written > 0 and bytes_written < self.bytes_written: + var temp = self.as_bytes_slice()[bytes_written : self.bytes_written] + parallel_memcpy(self.buf.unsafe_ptr(), temp.unsafe_ptr(), len(temp)) + self.buf.size += len(temp) + + self.bytes_written -= bytes_written + self.err = err + return err + + # Reset the buffer + self.buf.resize(0) + self.bytes_written = 0 + return err + + fn available(self) -> Int: + """Returns how many bytes are unused in the buffer.""" + return self.buf.capacity - len(self.buf) + + fn buffered(self) -> Int: + """Returns the number of bytes that have been written into the current buffer. + + Returns: + The number of bytes that have been written into the current buffer. + """ + return self.bytes_written + + fn write(inout self, src: Span[UInt8]) -> (Int, Error): + """Writes the contents of `src` into the internal buffer. + If `total_bytes_written` < `len(src)`, it also returns an error explaining + why the write is short. + + Args: + src: The bytes to write. + + Returns: + The number of bytes written. + """ + var total_bytes_written: Int = 0 + var src_copy = src # TODO: Make a copy, maybe try a non owning Span + var err = Error() + + # When writing more than the available buffer. + while len(src_copy) > self.available() and not self.err: + var bytes_written: Int = 0 + # Large write, empty buffer. Write directly from p to avoid copy. + if self.buffered() == 0: + bytes_written, err = self.writer.write(src_copy) + self.err = err + + # Write whatever we can to fill the internal buffer, then flush it to the underlying writer. + else: + var bytes_to_write = min(len(src_copy), self.buf.capacity - self.buf.size) + parallel_memcpy(self.buf.unsafe_ptr().offset(self.buf.size), src_copy.unsafe_ptr(), bytes_to_write) + bytes_written += bytes_to_write + self.buf.size += bytes_to_write + self.bytes_written += bytes_to_write + _ = self.flush() + + total_bytes_written += bytes_written + src_copy = src_copy[bytes_written : len(src_copy)] + + if self.err: + return total_bytes_written, self.err + + # Write up to the remaining buffer capacity to the internal buffer, starting from the first available position. + parallel_memcpy(self.buf.unsafe_ptr().offset(self.buf.size), src_copy.unsafe_ptr(), len(src_copy)) + self.buf.size += len(src_copy) + self.bytes_written += len(src_copy) + total_bytes_written += len(src_copy) + return total_bytes_written, err + + fn write_byte(inout self, src: UInt8) -> (Int, Error): + """Writes a single byte to the internal buffer. + + Args: + src: The byte to write. + + Returns: + The number of bytes written, and an error if one occurred. + """ + if self.err: + return 0, self.err + # If buffer is full, flush to the underlying writer. + var err = self.flush() + if self.available() <= 0 and err: + return 0, self.err + + self.buf.append(src) + self.bytes_written += 1 + + return 1, Error() + + # # WriteRune writes a single Unicode code point, returning + # # the number of bytes written and any error. + # fn WriteRune(r rune) (size int, err error): + # # Compare as uint32 to correctly handle negative runes. + # if uint32(r) < utf8.RuneSelf: + # err = self.write_posriteByte(byte(r)) + # if err != nil: + # return 0, err + + # return 1, nil + + # if self.err != nil: + # return 0, self.err + + # n := self.available() + # if n < utf8.UTFMax: + # if self.flush(); self.err != nil: + # return 0, self.err + + # n = self.available() + # if n < utf8.UTFMax: + # # Can only happen if buffer is silly small. + # return self.write_posriteString(string(r)) + + # size = utf8.EncodeRune(self.as_bytes_slice()[self.bytes_written:], r) + # self.bytes_written += size + # return size, nil + + fn write_string(inout self, src: String) -> (Int, Error): + """Writes a string to the internal buffer. + It returns the number of bytes written. + If the count is less than `len(src)`, it also returns an error explaining + why the write is short. + + Args: + src: The string to write. + + Returns: + The number of bytes written. + """ + return self.write(src.as_bytes_slice()) + + fn read_from[R: io.Reader](inout self, inout reader: R) -> (Int, Error): + """If there is buffered data and an underlying `read_from`, this fills + the buffer and writes it before calling `read_from`. + + Args: + reader: The reader to read from. + + Returns: + The number of bytes read. + """ + if self.err: + return 0, self.err + + var bytes_read: Int = 0 + var total_bytes_written: Int = 0 + var err = Error() + while True: + if self.available() == 0: + var err = self.flush() + if err: + return total_bytes_written, err + + var nr = 0 + while nr < MAX_CONSECUTIVE_EMPTY_READS: + # Read into remaining unused space in the buffer. + var buf = self.buf.unsafe_ptr().offset(self.buf.size) + bytes_read, err = reader._read(buf, self.buf.capacity - self.buf.size) + self.buf.size += bytes_read + + if bytes_read != 0 or err: + break + nr += 1 + + if nr == MAX_CONSECUTIVE_EMPTY_READS: + return bytes_read, io.ERR_NO_PROGRESS + + self.bytes_written += bytes_read + total_bytes_written += bytes_read + if err: + break + + if err and str(err) == str(io.EOF): + # If we filled the buffer exactly, flush preemptively. + if self.available() == 0: + err = self.flush() + else: + err = Error() + + return total_bytes_written, Error() diff --git a/src/gojo/bytes/__init__.mojo b/src/gojo/bytes/__init__.mojo new file mode 100644 index 0000000..6aa836d --- /dev/null +++ b/src/gojo/bytes/__init__.mojo @@ -0,0 +1,3 @@ +from .buffer import Buffer +from .reader import Reader +from .util import index_byte, has_suffix, has_prefix, to_string diff --git a/src/gojo/bytes/buffer.mojo b/src/gojo/bytes/buffer.mojo new file mode 100644 index 0000000..ae5dfe1 --- /dev/null +++ b/src/gojo/bytes/buffer.mojo @@ -0,0 +1,498 @@ +from utils import StringSlice, Span +from algorithm.memory import parallel_memcpy +from os import abort +import ..io +from ..bytes import index_byte + + +alias SMALL_BUFFER_SIZE: Int = 64 +"""Initial allocation minimal capacity.""" + +alias ReadOp = Int8 +"""The ReadOp constants describe the last action performed on +the buffer, so that unread_rune and unread_byte can check for +invalid usage. op_read_runeX constants are chosen such that +converted to Int they correspond to the rune size that was read.""" + +alias OP_READ: ReadOp = -1 +"""Any other read operation.""" +alias OP_INVALID: ReadOp = 0 +"""Non-read operation.""" +alias OP_READ_RUNE1: ReadOp = 1 +"""Read rune of size 1.""" +alias OP_READ_RUNE2: ReadOp = 2 +"""Read rune of size 2.""" +alias OP_READ_RUNE3: ReadOp = 3 +"""Read rune of size 3.""" +alias OP_READ_RUNE4: ReadOp = 4 +"""Read rune of size 4.""" + +alias MAX_INT: Int = 2147483647 +alias MIN_READ: Int = 512 +"""MIN_READ is the minimum slice size passed to a read call by +[Buffer.read_from]. As long as the [Buffer] has at least MIN_READ bytes beyond +what is required to hold the contents of r, read_from will not grow the +underlying buffer.""" + +alias ERR_TOO_LARGE = "buffer.Buffer: too large" +"""ERR_TOO_LARGE is passed to panic if memory cannot be allocated to store data in a buffer.""" +alias ERR_NEGATIVE_READ = "buffer.Buffer: reader returned negative count from read" +alias ERR_SHORT_WRITE = "short write" + + +struct Buffer( + Stringable, + Sized, + io.Reader, + io.Writer, + io.StringWriter, + io.ByteWriter, + io.ByteReader, +): + """A Buffer is a variable-sized buffer of bytes with Read and Write methods. + + Examples: + ```mojo + from gojo.bytes import buffer + var buf = buffer.Buffer(capacity=16) + _ = buf.write_string("Hello, World!") + + var dest = List[UInt8, True](capacity=16) + _ = buf.read(dest) + dest.append(0) + print(String(dest)) # Output: Hello, World! + ``` + . + """ + + var _data: UnsafePointer[UInt8] + """The contents of the bytes buffer. Active contents are from buf[off : len(buf)].""" + var _size: Int + """The number of bytes stored in the buffer.""" + var _capacity: Int + """The maximum capacity of the buffer, eg the allocation of self._data.""" + var offset: Int # + """The read/writer offset of the buffer. read at buf[off], write at buf[len(buf)].""" + var last_read: ReadOp + """Last read operation, so that unread* can work correctly.""" + + fn __init__(inout self, *, capacity: Int = io.BUFFER_SIZE): + """Creates a new buffer with the specified capacity. + + Args: + capacity: The initial capacity of the buffer. + """ + self._capacity = capacity + self._size = 0 + self._data = UnsafePointer[UInt8]().alloc(capacity) + self.offset = 0 + self.last_read = OP_INVALID + + fn __init__(inout self, owned buf: List[UInt8, True]): + """Creates a new buffer with List buffer provided. + + Args: + buf: The List buffer to initialize the buffer with. + """ + self._capacity = buf.capacity + self._size = buf.size + self._data = buf.steal_data() + self.offset = 0 + self.last_read = OP_INVALID + + fn __init__(inout self, buf: String): + """Creates a new buffer with String provided. + + Args: + buf: The String to initialize the buffer with. + """ + var bytes = buf.as_bytes() + self._capacity = bytes.capacity + self._size = bytes.size + self._data = bytes.steal_data() + self.offset = 0 + self.last_read = OP_INVALID + + fn __init__(inout self, *, owned data: UnsafePointer[UInt8], capacity: Int, size: Int): + """Creates a new buffer with UnsafePointer buffer provided. + + Args: + data: The List buffer to initialize the buffer with. + capacity: The initial capacity of the buffer. + size: The number of bytes stored in the buffer. + """ + self._capacity = capacity + self._size = size + self._data = data + self.offset = 0 + self.last_read = OP_INVALID + + fn __moveinit__(inout self, owned other: Self): + self._data = other._data + self._size = other._size + self._capacity = other._capacity + self.offset = other.offset + self.last_read = other.last_read + other._data = UnsafePointer[UInt8]() + other._size = 0 + other._capacity = 0 + other.offset = 0 + other.last_read = OP_INVALID + + fn __del__(owned self): + if self._data: + self._data.free() + + fn __len__(self) -> Int: + """Returns the number of bytes of the unread portion of the buffer. `self._size - self.offset`.""" + return self._size - self.offset + + fn bytes_ptr(self) -> UnsafePointer[UInt8]: + """Returns a pointer to the beginning of the unread portion of the buffer.""" + return self._data.offset(self.offset) + + fn bytes(self) -> List[UInt8, True]: + """Returns a list of bytes holding a copy of the unread portion of the buffer.""" + var copy = UnsafePointer[UInt8]().alloc(self._size) + parallel_memcpy(copy, self._data.offset(self.offset), self._size) + return List[UInt8, True](unsafe_pointer=copy, size=self._size - self.offset, capacity=self._size - self.offset) + + fn as_bytes_slice(ref [_]self) -> Span[UInt8, __lifetime_of(self)]: + """Returns the internal data as a Span[UInt8].""" + return Span[UInt8, __lifetime_of(self)](unsafe_ptr=self._data, len=self._size) + + fn as_string_slice(ref [_]self) -> StringSlice[__lifetime_of(self)]: + """ + Return a StringSlice view of the data owned by the builder. + + Returns: + The string representation of the bytes buffer. Returns an empty string if the bytes buffer is empty. + """ + return StringSlice[__lifetime_of(self)](unsafe_from_utf8_ptr=self._data, len=self._size) + + fn _resize(inout self, capacity: Int) -> None: + """ + Resizes the string builder buffer. + + Args: + capacity: The new capacity of the string builder buffer. + """ + var new_data = UnsafePointer[UInt8]().alloc(capacity) + parallel_memcpy(new_data, self._data, self._size) + self._data.free() + self._data = new_data + self._capacity = capacity + + return None + + fn _resize_if_needed(inout self, bytes_to_add: Int) -> None: + """Resizes the buffer if the number of bytes to add exceeds the buffer's capacity. + + Args: + bytes_to_add: The number of bytes to add to the buffer. + """ + # TODO: Handle the case where new_capacity is greater than MAX_INT. It should panic. + if bytes_to_add > self._capacity - self._size: + var new_capacity = int(self._capacity * 2) + if new_capacity < self._capacity + bytes_to_add: + new_capacity = self._capacity + bytes_to_add + self._resize(new_capacity) + + fn __str__(self) -> String: + """ + Converts the string builder to a string. + + Returns: + The string representation of the string builder. Returns an empty + string if the string builder is empty. + """ + return self.as_string_slice() + + @deprecated("Buffer.render() has been deprecated. Use Buffer.as_string_slice() or call str() instead.") + fn render(self) -> String: + """ + Return a StringSlice view of the data owned by the builder. + + Returns: + The string representation of the string builder. Returns an empty string if the string builder is empty. + """ + return self.as_string_slice() + + fn write(inout self, src: Span[UInt8]) -> (Int, Error): + """ + Appends a byte Span to the buffer. + + Args: + src: The byte array to append. + + Returns: + The number of bytes written to the buffer. + """ + self._resize_if_needed(len(src)) + + parallel_memcpy(self._data.offset(self._size), src._data, len(src)) + self._size += len(src) + + return len(src), Error() + + fn write_string(inout self, src: String) -> (Int, Error): + """ + Appends a string to the buffer. + + Args: + src: The string to append. + + Returns: + The number of bytes written to the buffer. + """ + return self.write(src.as_bytes_slice()) + + fn write_byte(inout self, byte: UInt8) -> (Int, Error): + """Appends a byte to the buffer, growing the buffer as needed. + The returned error is always empty, but is included to match [bufio.Writer]'s + write_byte. If the buffer becomes too large, write_byte will panic with + `ERR_TOO_LARGE`. + + Args: + byte: The byte to write to the buffer. + + Returns: + The number of bytes written to the buffer. + """ + self.last_read = OP_INVALID + self._resize_if_needed(1) + self._data[self._size] = byte + self._size += 1 + + return 1, Error() + + fn empty(self) -> Bool: + """Reports whether the unread portion of the buffer is empty.""" + return self._size <= self.offset + + fn reset(inout self) -> None: + """Resets the buffer to be empty.""" + if self._data: + self._data.free() + self._data = UnsafePointer[UInt8]().alloc(self._capacity) + self._size = 0 + self.offset = 0 + self.last_read = OP_INVALID + + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + """Reads the next len(dest) bytes from the buffer or until the buffer + is drained. The return value `bytes_read` is the number of bytes read. + + If the buffer has no data to return, err is `io.EOF` (unless `len(dest)` is zero); + otherwise it is empty. + + Args: + dest: The buffer to read into. + capacity: The capacity of the destination buffer. + + Returns: + The number of bytes read from the buffer. + """ + self.last_read = OP_INVALID + if self.empty(): + # Buffer is empty, reset to recover space. + self.reset() + if capacity == 0: + return 0, Error() + return 0, io.EOF + + # Copy the data of the internal buffer from offset to len(buf) into the destination buffer at the given index. + var bytes_to_read = self.as_bytes_slice()[self.offset :] + var count = min(capacity, len(bytes_to_read)) + parallel_memcpy(dest, bytes_to_read.unsafe_ptr(), count) + self.offset += count + + if count > 0: + self.last_read = OP_READ + + return count, Error() + + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + """Reads the next len(dest) bytes from the buffer or until the buffer + is drained. The return value `bytes_read` is the number of bytes read. + + If the buffer has no data to return, err is `io.EOF` (unless `len(dest)` is zero); + otherwise it is empty. + + Args: + dest: The buffer to read into. + + Returns: + The number of bytes read from the buffer. + """ + var dest_ptr = dest.unsafe_ptr().offset(dest.size) + var bytes_read: Int + var err: Error + bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) + dest.size += bytes_read + + return bytes_read, err + + fn read_byte(inout self) -> (UInt8, Error): + """Reads and returns the next byte from the buffer. If no byte is available, it returns error `io.EOF`. + + Returns: + The next byte from the buffer. + """ + if self.empty(): + # Buffer is empty, reset to recover space. + self.reset() + return UInt8(0), io.EOF + + var byte = self._data[self.offset] + self.offset += 1 + self.last_read = OP_READ + + return byte, Error() + + fn unread_byte(inout self) -> Error: + """Unreads the last byte returned by the most recent successful read operation that read at least one byte. + + Returns: + If a write has happened since the last read, the last read returned an error, or if the read read zero + bytes, `unread_byte` returns an error. + """ + if self.last_read == OP_INVALID: + return Error("buffer.Buffer: unread_byte: previous operation was not a successful read") + + self.last_read = OP_INVALID + if self.offset > 0: + self.offset -= 1 + + return Error() + + fn read_bytes(inout self, delim: UInt8) -> (List[UInt8, True], Error): + """Reads until the first occurrence of `delim` in the input, + returning a List copy containing the data up to and including the delimiter. + + If `read_bytes` encounters an error before finding a delimiter, + it returns the data read before the error and the error itself (often `io.EOF`). + `read_bytes` returns an error if and only if the returned data does not end in + `delim`. + + Args: + delim: The delimiter to read until. + + Returns: + A list containing the data up to and including the delimiter. + """ + var slice: Span[UInt8, __lifetime_of(self)] + var err: Error + slice, err = self.read_slice(delim) + + var bytes = List[UInt8, True](capacity=len(slice) + 1) + for byte in slice: + bytes.append(byte[]) + + return bytes, err + + fn read_slice(inout self, delim: UInt8) -> (Span[UInt8, __lifetime_of(self)], Error): + """Like `read_bytes` but returns a reference to internal buffer data. + + Args: + delim: The delimiter to read until. + + Returns: + A span containing the data up to and including the delimiter. + """ + var i = index_byte(bytes=self.as_bytes_slice(), delim=delim) + var end = self.offset + i + 1 + + var err = Error() + if i < 0: + end = self._size + err = Error(str(io.EOF)) + + var line = self.as_bytes_slice()[self.offset : end] + self.offset = end + self.last_read = OP_READ + + return line, err + + fn read_string(inout self, delim: UInt8) -> (String, Error): + """Reads until the first occurrence of `delim` in the input, + returning a string containing the data up to and including the delimiter. + + If `read_string` encounters an error before finding a delimiter, + it returns the data read before the error and the error itself (often `io.EOF`). + `read_string` returns an error if and only if the returned data does not end + in `delim`. + + Args: + delim: The delimiter to read until. + + Returns: + A string containing the data up to and including the delimiter. + """ + var bytes: List[UInt8, True] + var err: Error + bytes, err = self.read_bytes(delim) + bytes.append(0) + + return String(bytes^), err + + fn next(inout self, number_of_bytes: Int) -> Span[UInt8, __lifetime_of(self)]: + """Returns a Span containing the next n bytes from the buffer, + advancing the buffer as if the bytes had been returned by `Buffer.read`. + + If there are fewer than n bytes in the buffer, `next` returns the entire buffer. + + Args: + number_of_bytes: The number of bytes to read from the buffer. + + Returns: + A slice containing the next n bytes from the buffer. + """ + self.last_read = OP_INVALID + var bytes_remaining = len(self) + var bytes_to_read = number_of_bytes + if bytes_to_read > bytes_remaining: + bytes_to_read = bytes_remaining + + var data = self.as_bytes_slice()[self.offset : self.offset + bytes_to_read] + + self.offset += bytes_to_read + if bytes_to_read > 0: + self.last_read = OP_READ + + return data + + fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int, Error): + """Writes data to `writer` until the buffer is drained or an error occurs. + The return value `total_bytes_written` is the number of bytes written; Any error + encountered during the write is also returned. + + Args: + writer: The writer to write to. + + Returns: + The number of bytes written to the writer. + """ + self.last_read = OP_INVALID + var bytes_to_write = len(self) + var total_bytes_written: Int = 0 + + if bytes_to_write > 0: + var bytes_written: Int + var err: Error + bytes_written, err = writer.write(self.as_bytes_slice()[self.offset :]) + if bytes_written > bytes_to_write: + abort("bytes.Buffer.write_to: invalid write count") + + self.offset += bytes_written + total_bytes_written = bytes_written + if err: + return total_bytes_written, err + + # all bytes should have been written, by definition of write method in io.Writer + if bytes_written != bytes_to_write: + return total_bytes_written, Error(ERR_SHORT_WRITE) + + # Buffer is now empty; reset. + self.reset() + return total_bytes_written, Error() diff --git a/src/gojo/bytes/reader.mojo b/src/gojo/bytes/reader.mojo new file mode 100644 index 0000000..6a9dd0a --- /dev/null +++ b/src/gojo/bytes/reader.mojo @@ -0,0 +1,294 @@ +from utils import Span +from os import abort +from algorithm.memory import parallel_memcpy +import ..io + + +struct Reader( + Sized, + io.Reader, + io.ReaderAt, + io.WriterTo, + io.Seeker, + io.ByteReader, + io.ByteScanner, +): + """A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker, + io.ByteScanner, and io.RuneScanner Interfaces by reading from + a bytes pointer. Unlike a `Buffer`, a `Reader` is read-only and supports seeking. + + Examples: + ```mojo + from gojo.bytes import reader + + var reader = reader.Reader(buffer=String("Hello, World!").as_bytes()) + var dest = List[UInt8, True](capacity=16) + _ = reader.read(dest) + dest.append(0) + print(String(dest)) # Output: Hello, World! + ``` + . + """ + + var _data: UnsafePointer[UInt8] + """The contents of the bytes buffer. Active contents are from buf[off : len(buf)].""" + var _size: Int + """The number of bytes stored in the buffer.""" + var _capacity: Int + """The maximum capacity of the buffer, eg the allocation of self._data.""" + var index: Int + """Current reading index.""" + var prev_rune: Int + """Index of previous rune; or < 0.""" + + fn __init__(inout self, owned buffer: List[UInt8, True]): + """Initializes a new `Reader` with the given List buffer. + + Args: + buffer: The buffer to read from. + """ + self._capacity = buffer.capacity + self._size = buffer.size + self._data = buffer.steal_data() + self.index = 0 + self.prev_rune = -1 + + fn __init__(inout self, text: String): + """Initializes a new `Reader` with the given String. + + Args: + text: The String to initialize the `Reader` with. + """ + var bytes = text.as_bytes() + self._capacity = bytes.capacity + self._size = bytes.size + self._data = bytes.steal_data() + self.index = 0 + self.prev_rune = -1 + + fn __moveinit__(inout self, owned other: Reader): + self._capacity = other._capacity + self._size = other._size + self._data = other._data + self.index = other.index + self.prev_rune = other.prev_rune + + other._data = UnsafePointer[UInt8]() + other._size = 0 + other._capacity = 0 + other.index = 0 + other.prev_rune = -1 + + fn __len__(self) -> Int: + """Returns the number of bytes of the unread portion of the slice.""" + return self._size - int(self.index) + + fn __del__(owned self) -> None: + """Frees the internal buffer.""" + if self._data: + self._data.free() + + fn as_bytes_slice(ref [_]self) -> Span[UInt8, __lifetime_of(self)]: + """Returns the internal data as a Span[UInt8].""" + return Span[UInt8, __lifetime_of(self)](unsafe_ptr=self._data, len=self._size) + + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + """Reads from the internal buffer into the destination buffer. + + Args: + dest: The destination buffer to read into. + capacity: The capacity of the destination buffer. + + Returns: + Int: The number of bytes read into dest. + """ + if self.index >= self._size: + return 0, io.EOF + + # Copy the data of the internal buffer from offset to len(buf) into the destination buffer at the given index. + self.prev_rune = -1 + var bytes_to_write = self.as_bytes_slice()[self.index : self._size] + var count = min(len(bytes_to_write), capacity) + parallel_memcpy(dest, bytes_to_write.unsafe_ptr(), count) + # var bytes_written = copy(dest, bytes_to_write.unsafe_ptr(), len(bytes_to_write)) + self.index += count + + return count, Error() + + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + """Reads from the internal buffer into the destination buffer. + + Args: + dest: The destination buffer to read into. + + Returns: + Int: The number of bytes read into dest. + """ + var dest_ptr = dest.unsafe_ptr().offset(dest.size) + var bytes_read: Int + var err: Error + bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) + dest.size += bytes_read + + return bytes_read, err + + fn _read_at(self, inout dest: Span[UInt8], off: Int, capacity: Int) -> (Int, Error): + """Reads `len(dest)` bytes into `dest` beginning at byte offset `off`. + + Args: + dest: The destination buffer to read into. + off: The offset to start reading from. + capacity: The capacity of the destination buffer. + + Returns: + The number of bytes read into dest. + """ + # cannot modify state - see io.ReaderAt + if off < 0: + return 0, Error("bytes.Reader.read_at: negative offset") + + if off >= Int(self._size): + return 0, io.EOF + + var unread_bytes = self.as_bytes_slice()[off : self._size] + var count = min(len(unread_bytes), capacity) + parallel_memcpy(dest.unsafe_ptr(), unread_bytes.unsafe_ptr(), count) + # var bytes_written = copy(dest.unsafe_ptr(), unread_bytes.unsafe_ptr(), len(unread_bytes)) + if count < len(dest): + return 0, io.EOF + + return count, Error() + + fn read_at(self, inout dest: List[UInt8, True], off: Int) -> (Int, Error): + """Reads `len(dest)` bytes into `dest` beginning at byte offset `off`. + + Args: + dest: The destination buffer to read into. + off: The offset to start reading from. + + Returns: + The number of bytes read into dest. + """ + var span = Span(dest) + var bytes_read: Int + var err: Error + bytes_read, err = self._read_at(span, off, dest.capacity) + dest.size += bytes_read + + return bytes_read, err + + fn read_byte(inout self) -> (UInt8, Error): + """Reads and returns a single byte from the internal buffer.""" + self.prev_rune = -1 + if self.index >= self._size: + return UInt8(0), io.EOF + + var byte = self._data[self.index] + self.index += 1 + return byte, Error() + + fn unread_byte(inout self) -> Error: + """Unreads the last byte read by moving the read position back by one. + + Returns: + An error if the read position is at the beginning of the buffer. + """ + if self.index <= 0: + return Error("bytes.Reader.unread_byte: at beginning of buffer.") + self.prev_rune = -1 + self.index -= 1 + + return Error() + + # # read_rune implements the [io.RuneReader] Interface. + # fn read_rune(self) (ch rune, size Int, err error): + # if self.index >= Int(self._size): + # self.prev_rune = -1 + # return 0, 0, io.EOF + + # self.prev_rune = Int(self.index) + # if c := self.buffer[self.index]; c < utf8.RuneSelf: + # self.index+= 1 + # return rune(c), 1, nil + + # ch, size = utf8.DecodeRune(self.buffer[self.index:]) + # self.index += Int(size) + # return + + # # unread_rune complements [Reader.read_rune] in implementing the [io.RuneScanner] Interface. + # fn unread_rune(self) error: + # if self.index <= 0: + # return errors.New("bytes.Reader.unread_rune: at beginning of slice") + + # if self.prev_rune < 0: + # return errors.New("bytes.Reader.unread_rune: previous operation was not read_rune") + + # self.index = Int(self.prev_rune) + # self.prev_rune = -1 + # return nil + + fn seek(inout self, offset: Int, whence: Int) -> (Int, Error): + """Moves the read position to the specified `offset` from the specified `whence`. + + Args: + offset: The offset to move to. + whence: The reference point for offset. + + Returns: + The new position in which the next read will start from. + """ + self.prev_rune = -1 + var position: Int = 0 + + if whence == io.SEEK_START: + position = offset + elif whence == io.SEEK_CURRENT: + position = self.index + offset + elif whence == io.SEEK_END: + position = self._size + offset + else: + return Int(0), Error("bytes.Reader.seek: invalid whence") + + if position < 0: + return Int(0), Error("bytes.Reader.seek: negative position") + + self.index = position + return position, Error() + + fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int, Error): + """Writes data to `writer` until the buffer is drained or an error occurs. + + Args: + writer: The writer to write to. + + Returns: + The number of bytes written and an error if one occurred. + """ + self.prev_rune = -1 + if self.index >= self._size: + return 0, Error() + + var bytes = self.as_bytes_slice()[self.index : self._size] + var write_count: Int + var err: Error + write_count, err = writer.write(bytes) + if write_count > len(bytes): + abort("bytes.Reader.write_to: invalid Write count") + + self.index += write_count + if write_count != len(bytes): + return write_count, io.ERR_SHORT_WRITE + + return write_count, Error() + + fn reset(inout self, owned buffer: List[UInt8, True]) -> None: + """Resets the `Reader` to be reading from `buffer`. + + Args: + buffer: The new buffer to read from. + """ + self._capacity = buffer.capacity + self._size = buffer.size + self._data = buffer.steal_data() + self.index = 0 + self.prev_rune = -1 diff --git a/src/gojo/bytes/util.mojo b/src/gojo/bytes/util.mojo new file mode 100644 index 0000000..0311374 --- /dev/null +++ b/src/gojo/bytes/util.mojo @@ -0,0 +1,108 @@ +from utils import Span + + +fn equals(left: List[UInt8, True], right: List[UInt8, True]) -> Bool: + """Reports if `left` and `right` are equal. + + Args: + left: The first list to compare. + right: The second list to compare. + """ + if len(left) != len(right): + return False + for i in range(len(left)): + if left[i] != right[i]: + return False + return True + + +fn has_prefix(bytes: List[UInt8, True], prefix: List[UInt8, True]) -> Bool: + """Reports if the list begins with prefix. + + Args: + bytes: The list to search. + prefix: The prefix to search for. + """ + var len_comparison = len(bytes) >= len(prefix) + var prefix_comparison = equals(bytes[0 : len(prefix)], prefix) + return len_comparison and prefix_comparison + + +fn has_suffix(bytes: List[UInt8, True], suffix: List[UInt8, True]) -> Bool: + """Reports if the list ends with suffix. + + Args: + bytes: The list struct to search. + suffix: The suffix to search for. + """ + var len_comparison = len(bytes) >= len(suffix) + var suffix_comparison = equals(bytes[len(bytes) - len(suffix) : len(bytes)], suffix) + return len_comparison and suffix_comparison + + +fn index_byte(bytes: List[UInt8, True], delim: UInt8) -> Int: + """Return the index of the first occurrence of the byte `delim`. + + Args: + bytes: The list to search. + delim: The byte to search for. + + Returns: + The index of the first occurrence of the byte `delim`. + """ + for i in range(len(bytes)): + if bytes[i] == delim: + return i + + return -1 + + +fn index_byte(bytes: UnsafePointer[Scalar[DType.uint8]], size: Int, delim: UInt8) -> Int: + """Return the index of the first occurrence of the byte `delim`. + + Args: + bytes: The list to search. + size: The number of elements stored at the pointer address. + delim: The byte to search for. + + Returns: + The index of the first occurrence of the byte `delim`. + """ + for i in range(size): + if UInt8(bytes[i]) == delim: + return i + + return -1 + + +fn index_byte(bytes: Span[UInt8], delim: UInt8) -> Int: + """Return the index of the first occurrence of the byte `delim`. + + Args: + bytes: The Span to search. + delim: The byte to search for. + + Returns: + The index of the first occurrence of the byte `delim`. + """ + for i in range(len(bytes)): + if bytes[i] == delim: + return i + + return -1 + + +fn to_string(bytes: List[UInt8, True]) -> String: + """Makes a deep copy of the list supplied and converts it to a string. + If it's not null terminated, it will append a null byte. + + Args: + bytes: The list to convert. + + Returns: + A String built from the list of bytes. + """ + var copy = List[UInt8](bytes) + if copy[-1] != 0: + copy.append(0) + return String(copy^) diff --git a/gojo/fmt/__init__.mojo b/src/gojo/fmt/__init__.mojo similarity index 100% rename from gojo/fmt/__init__.mojo rename to src/gojo/fmt/__init__.mojo diff --git a/gojo/fmt/fmt.mojo b/src/gojo/fmt/fmt.mojo similarity index 70% rename from gojo/fmt/fmt.mojo rename to src/gojo/fmt/fmt.mojo index 6766715..f9dccce 100644 --- a/gojo/fmt/fmt.mojo +++ b/src/gojo/fmt/fmt.mojo @@ -9,8 +9,6 @@ Boolean Integer %d base 10 %q a single-quoted character literal. -%x base 16, with lower-case letters for a-f -%X base 16, with upper-case letters for A-F Floating-point and complex constituents: %f decimal point but no exponent, e.g. 123.456 @@ -27,10 +25,8 @@ TODO: """ from utils.variant import Variant -from math import floor -from ..builtins import Byte -alias Args = Variant[String, Int, Float64, Bool, List[Byte]] +alias Args = Variant[String, Int, Float64, Bool, List[UInt8, True]] fn replace_first(s: String, old: String, new: String) -> String: @@ -77,52 +73,16 @@ fn find_first_verb(s: String, verbs: List[String]) -> String: return verb -alias BASE10_TO_BASE16 = List[String]( - "0", - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - "a", - "b", - "c", - "d", - "e", - "f", -) - - -fn convert_base10_to_base16(value: Int) -> String: - """Converts a base 10 number to base 16. +fn format_string(format: String, arg: String) -> String: + """Format a string argument. Args: - value: Base 10 number. + format: The format string. + arg: The string argument. Returns: - Base 16 number as a String. + The formatted string. """ - - var val: Float64 = 0.0 - var result: Float64 = value - var base16: String = "" - while result > 1: - var temp = result / 16 - var floor_result = floor(temp) - var remainder = temp - floor_result - result = floor_result - val = 16 * remainder - - base16 = BASE10_TO_BASE16[int(val)] + base16 - - return base16 - - -fn format_string(format: String, arg: String) -> String: var verb = find_first_verb(format, List[String]("%s", "%q")) var arg_to_place = arg if verb == "%q": @@ -131,7 +91,16 @@ fn format_string(format: String, arg: String) -> String: return replace_first(format, String("%s"), arg) -fn format_bytes(format: String, arg: List[Byte]) -> String: +fn format_bytes(format: String, arg: List[UInt8, True]) -> String: + """Format a byte list argument. + + Args: + format: The format string. + arg: The byte list argument. + + Returns: + The formatted byte list. + """ var argument = arg if argument[-1] != 0: argument.append(0) @@ -140,23 +109,46 @@ fn format_bytes(format: String, arg: List[Byte]) -> String: fn format_integer(format: String, arg: Int) -> String: - var verb = find_first_verb(format, List[String]("%x", "%X", "%d", "%q")) + """Format an integer argument. + + Args: + format: The format string. + arg: The integer argument. + + Returns: + The formatted integer. + """ + var verb = find_first_verb(format, List[String]("%d", "%q")) var arg_to_place = str(arg) - if verb == "%x": - arg_to_place = str(convert_base10_to_base16(arg)).lower() - elif verb == "%X": - arg_to_place = str(convert_base10_to_base16(arg)).upper() - elif verb == "%q": + if verb == "%q": arg_to_place = "'" + str(arg) + "'" return replace_first(format, verb, arg_to_place) fn format_float(format: String, arg: Float64) -> String: + """Format a float argument. + + Args: + format: The format string. + arg: The float argument. + + Returns: + The formatted float. + """ return replace_first(format, str("%f"), str(arg)) fn format_boolean(format: String, arg: Bool) -> String: + """Format a boolean argument. + + Args: + format: The format string. + arg: The boolean argument. + + Returns: + The formatted boolean. + """ var value: String = "False" if arg: value = "True" @@ -164,24 +156,33 @@ fn format_boolean(format: String, arg: Bool) -> String: return replace_first(format, String("%t"), value) -# If the number of arguments does not match the number of format specifiers -alias BadArgCount = "(BAD ARG COUNT)" +alias BAD_ARG_COUNT = "(BAD ARG COUNT)" +"""If the number of arguments does not match the number of format specifiers.""" fn sprintf(formatting: String, *args: Args) -> String: + """Format a string with the given arguments. + + Args: + formatting: The format string. + args: The arguments to format the string with. + + Returns: + The formatted string. + """ var text = formatting var raw_percent_count = formatting.count("%%") * 2 var formatter_count = formatting.count("%") - raw_percent_count if formatter_count != len(args): - return BadArgCount + return BAD_ARG_COUNT for i in range(len(args)): var argument = args[i] if argument.isa[String](): text = format_string(text, argument[String]) - elif argument.isa[List[Byte]](): - text = format_bytes(text, argument[List[Byte]]) + elif argument.isa[List[UInt8, True]](): + text = format_bytes(text, argument[List[UInt8, True]]) elif argument.isa[Int](): text = format_integer(text, argument[Int]) elif argument.isa[Float64](): @@ -194,6 +195,15 @@ fn sprintf(formatting: String, *args: Args) -> String: # TODO: temporary until we have arg packing. fn sprintf_str(formatting: String, args: List[String]) raises -> String: + """Format a string with the given arguments. + + Args: + formatting: The format string. + args: The arguments to format the string with. + + Returns: + The formatted string. + """ var text = formatting var formatter_count = formatting.count("%") @@ -209,6 +219,12 @@ fn sprintf_str(formatting: String, args: List[String]) raises -> String: fn printf(formatting: String, *args: Args) raises: + """Print a formatted string with the given arguments. + + Args: + formatting: The format string. + args: The arguments to format the string with. + """ var text = formatting var raw_percent_count = formatting.count("%%") * 2 var formatter_count = formatting.count("%") - raw_percent_count @@ -222,8 +238,8 @@ fn printf(formatting: String, *args: Args) raises: var argument = args[i] if argument.isa[String](): text = format_string(text, argument[String]) - elif argument.isa[List[Byte]](): - text = format_bytes(text, argument[List[Byte]]) + elif argument.isa[List[UInt8, True]](): + text = format_bytes(text, argument[List[UInt8, True]]) elif argument.isa[Int](): text = format_integer(text, argument[Int]) elif argument.isa[Float64](): diff --git a/src/gojo/io/__init__.mojo b/src/gojo/io/__init__.mojo new file mode 100644 index 0000000..b260881 --- /dev/null +++ b/src/gojo/io/__init__.mojo @@ -0,0 +1,329 @@ +"""`io` provides basic interfaces to I/O primitives. +Its primary job is to wrap existing implementations of such primitives, +such as those in package os, into shared public interfaces that +abstract the fntionality, plus some other related primitives. + +Because these interfaces and primitives wrap lower-level operations with +various implementations, unless otherwise informed clients should not +assume they are safe for parallel execution. +seek whence values. +""" +from utils import Span +from .io import write_string, read_at_least, read_full, read_all, BUFFER_SIZE +from .file import FileWrapper +from .std import STDWriter + + +alias Rune = Int32 + +alias SEEK_START = 0 +"""seek relative to the origin of the file.""" +alias SEEK_CURRENT = 1 +"""seek relative to the current offset.""" +alias SEEK_END = 2 +"""seek relative to the end.""" + +alias ERR_SHORT_WRITE = Error("short write") +"""A write accepted fewer bytes than requested, but failed to return an explicit error.""" + +alias ERR_INVALID_WRITE = Error("invalid write result") +"""A write returned an impossible count.""" + +alias ERR_SHORT_BUFFER = Error("short buffer") +"""A read required a longer buffer than was provided.""" + +alias EOF = Error("EOF") +"""Returned by `read` when no more input is available. +(`read` must return `EOF` itself, not an error wrapping EOF, +because callers will test for EOF using `==`) + +Functions should return `EOF` only to signal a graceful end of input. +If the `EOF` occurs unexpectedly in a structured data stream, +the appropriate error is either `ERR_UNEXPECTED_EOF` or some other error +giving more detail.""" + +alias ERR_UNEXPECTED_EOF = Error("unexpected EOF") +"""EOF was encountered in the middle of reading a fixed-size block or data structure.""" + +alias ERR_NO_PROGRESS = Error("multiple read calls return no data or error") +"""Returned by some clients of a `Reader` when +many calls to read have failed to return any data or error, +usually the sign of a broken `Reader` implementation.""" + + +trait Reader(Movable): + """Wraps the basic `read` method. + + `read` reads up to `len(dest)` bytes into p. It returns the number of bytes + `read` `(0 <= n <= len(dest))` and any error encountered. Even if `read` + returns n < `len(dest)`, it may use all of p as scratch space during the call. + If some data is available but not `len(dest)` bytes, read conventionally + returns what is available instead of waiting for more. + + When read encounters an error or end-of-file condition after + successfully reading n > 0 bytes, it returns the number of + bytes read. It may return an error from the same call + or return the error (and n == 0) from a subsequent call. + An instance of this general case is that a Reader returning + a non-zero number of bytes at the end of the input stream may + return either err == `EOF` or err == Error(). The next read should + return 0, EOF. + + Callers should always process the n > 0 bytes returned before + considering the error err. Doing so correctly handles I/O errors + that happen after reading some bytes and also both of the + allowed `EOF` behaviors. + + If `len(dest) == 0`, `read` should always return n == 0. It may return an + error if some error condition is known, such as `EOF`. + + Implementations of `read` are discouraged from returning a + zero byte count with an empty error, except when `len(dest) == 0`. + Callers should treat a return of 0 and an empty error as indicating that + nothing happened; in particular it does not indicate `EOF`. + + Implementations must not retain `dest`.""" + + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + ... + + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + ... + + +trait Writer(Movable): + """Wraps the basic `write` method. + + `write` writes `len(dest)` bytes from `src` to the underlying data stream. + It returns the number of bytes written from `src` (0 <= n <= `len(dest)`) + and any error encountered that caused the `write` to stop early. + `write` must return an error if it returns `n < len(dest)`. + `write` must not modify the data `src`, even temporarily. + + Implementations must not retain `src`. + """ + + fn write(inout self, src: Span[UInt8, _]) -> (Int, Error): + ... + + +trait Closer(Movable): + """Wraps the basic `close` method. + + The behavior of `close` after the first call is undefined. + Specific implementations may document their own behavior. + """ + + fn close(inout self) -> Error: + ... + + +trait Seeker(Movable): + """Wraps the basic `seek` method. + + `seek` sets the offset for the next read or write to offset, + interpreted according to whence: + `SEEK_START` means relative to the start of the file, + `SEEK_CURRENT` means relative to the current offset, and + `SEEK_END]` means relative to the end + (for example, `offset = -2` specifies the penultimate byte of the file). + `seek` returns the new offset relative to the start of the + file or an error, if any. + + Seeking to an offset before the start of the file is an error. + Seeking to any positive offset may be allowed, but if the new offset exceeds + the size of the underlying object the behavior of subsequent I/O operations + is implementation dependent. + """ + + fn seek(inout self, offset: Int, whence: Int) -> (Int, Error): + ... + + +trait ReadWriter(Reader, Writer): + ... + + +trait ReadCloser(Reader, Closer): + ... + + +trait WriteCloser(Writer, Closer): + ... + + +trait ReadWriteCloser(Reader, Writer, Closer): + ... + + +trait ReadSeeker(Reader, Seeker): + ... + + +trait ReadSeekCloser(Reader, Seeker, Closer): + ... + + +trait WriteSeeker(Writer, Seeker): + ... + + +trait ReadWriteSeeker(Reader, Writer, Seeker): + ... + + +trait ReaderFrom: + """Wraps the `read_from` method. + + `read_from` reads data from `reader` until `EOF` or error. + The return value n is the number of bytes read. + Any error except `EOF` encountered during the read is also returned. + """ + + fn read_from[R: Reader](inout self, inout reader: R) -> (Int, Error): + ... + + +trait WriterReadFrom(Writer, ReaderFrom): + ... + + +trait WriterTo: + """Wraps the `write_to` method. + + `write_to` writes data to `writer` until there's no more data to write or + when an error occurs. The return value n is the number of bytes + written. Any error encountered during the write is also returned. + """ + + fn write_to[W: Writer](inout self, inout writer: W) -> (Int, Error): + ... + + +trait ReaderWriteTo(Reader, WriterTo): + ... + + +trait ReaderAt: + """Wraps the basic `read_at` method. + + `read_at` reads `len(dest)` bytes into `dest` starting at offset `off` in the + underlying input source. It returns the number of bytes + read (`0 <= n <= len(dest)`) and any error encountered. + + When `read_at` returns `n < len(dest)`, it returns an error + explaining why more bytes were not returned. In this respect, + `read_at` is stricter than `read`. + + Even if `read_at` returns `n < len(dest)`, it may use all of `dest` as scratch + space during the call. If some data is available but not `len(dest)` bytes, + `read_at` blocks until either all the data is available or an error occurs. + In this respect `read_at` is different from `read`. + + If the `n = len(dest)` bytes returned by `read_at` are at the end of the + input source, `read_at` may return either err == `EOF` or an empty error. + + If `read_at` is reading from an input source with a seek offset, + `read_at` should not affect nor be affected by the underlying + seek offset. + + Clients of `read_at` can execute parallel `read_at` calls on the + same input source. + + Implementations must not retain `dest`.""" + + fn read_at(self, inout dest: List[UInt8, True], off: Int) -> (Int, Error): + ... + + fn _read_at(self, inout dest: Span[UInt8], off: Int, capacity: Int) -> (Int, Error): + ... + + +trait WriterAt: + """Wraps the basic `write_at` method. + + `write_at` writes `len(dest)` bytes from p to the underlying data stream + at offset `off`. It returns the number of bytes written from p (`0 <= n <= len(dest)`) + and any error encountered that caused the write to stop early. + `write_at` must return an error if it returns `n < len(dest)`. + + If `write_at` is writing to a destination with a seek offset, + `write_at` should not affect nor be affected by the underlying + seek offset. + + Clients of `write_at` can execute parallel `write_at` calls on the same + destination if the ranges do not overlap. + + Implementations must not retain `src`.""" + + fn _write_at(self, src: Span[UInt8], off: Int) -> (Int, Error): + ... + + fn write_at(self, src: List[UInt8, True], off: Int) -> (Int, Error): + ... + + +trait ByteReader: + """Wraps the `read_byte` method. + + `read_byte` reads and returns the next byte from the input or + any error encountered. If `read_byte` returns an error, no input + byte was consumed, and the returned byte value is undefined. + + `read_byte` provides an efficient trait for byte-at-time + processing. A `Reader` that does not implement `ByteReader` + can be wrapped using `bufio.Reader` to add this method.""" + + fn read_byte(inout self) -> (UInt8, Error): + ... + + +trait ByteScanner(ByteReader): + """Adds the `unread_byte` method to the basic `read_byte` method. + + `unread_byte` causes the next call to `read_byte` to return the last byte read. + If the last operation was not a successful call to `read_byte`, `unread_byte` may + return an error, unread the last byte read (or the byte prior to the + last-unread byte), or (in implementations that support the `Seeker` trait) + seek to one byte before the current offset.""" + + fn unread_byte(inout self) -> Error: + ... + + +trait ByteWriter: + """Wraps the `write_byte` method.""" + + fn write_byte(inout self, byte: UInt8) -> (Int, Error): + ... + + +trait RuneReader: + """Wraps the `read_rune` method. + + `read_rune` reads a single encoded Unicode character + and returns the rune and its size in bytes. If no character is + available, err will be set.""" + + fn read_rune(inout self) -> (Rune, Int): + ... + + +trait RuneScanner(RuneReader): + """Adds the `unread_rune` method to the basic `read_rune` method. + + `unread_rune` causes the next call to `read_rune` to return the last rune read. + If the last operation was not a successful call to `read_rune`, `unread_rune` may + return an error, unread the last rune read (or the rune prior to the + last-unread rune), or (in implementations that support the `Seeker` trait) + seek to the start of the rune before the current offset.""" + + fn unread_rune(inout self) -> Rune: + ... + + +trait StringWriter: + """Wraps the `write_string` method.""" + + fn write_string(inout self, src: String) -> (Int, Error): + ... diff --git a/src/gojo/io/file.mojo b/src/gojo/io/file.mojo new file mode 100644 index 0000000..2ce604a --- /dev/null +++ b/src/gojo/io/file.mojo @@ -0,0 +1,182 @@ +import ..io + + +struct FileWrapper(io.ReadWriteCloser, io.ByteReader): + """FileWrapper wraps a file handle and implements the ReadWriteCloser and ByteReader traits.""" + + var handle: FileHandle + """The file handle to read/write from/to.""" + + fn __init__(inout self, path: String, mode: String) raises: + """Create a new FileWrapper instance. + + Args: + path: The path to the file. + mode: The mode to open the file in. + """ + self.handle = open(path, mode) + + fn __moveinit__(inout self, owned existing: Self): + self.handle = existing.handle^ + + fn __del__(owned self): + var err = self.close() + if err: + # TODO: __del__ can't raise, but there should be some fallback. + print(str(err)) + + fn close(inout self) -> Error: + """Close the file handle.""" + try: + self.handle.close() + except e: + return e + + return Error() + + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + """Read from the file handle into `dest`. + Pretty hacky way to force the filehandle read into the defined trait, and it's unsafe since we're + reading directly into the pointer. + + Args: + dest: The buffer to read data into. + capacity: The capacity of the destination buffer. + + Returns: + The number of bytes read, or an error if one occurred. + """ + var bytes_read: Int + try: + bytes_read = int(self.handle.read(ptr=dest, size=capacity)) + except e: + return 0, e + + if bytes_read == 0: + return bytes_read, io.EOF + + return bytes_read, Error() + + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + """Read from the file handle into `dest`. + Pretty hacky way to force the filehandle read into the defined trait, and it's unsafe since we're + reading directly into the pointer. + + Args: + dest: The buffer to read data into. + + Returns: + The number of bytes read, or an error if one occurred. + """ + var dest_ptr = dest.unsafe_ptr().offset(dest.size) + var bytes_read: Int + var err: Error + bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) + dest.size += bytes_read + + return bytes_read, err + + fn read_all(inout self) -> (List[UInt8, True], Error): + """Read all data from the file handle. + + Returns: + The data read from the file handle, or an error if one occurred. + """ + + var bytes = List[UInt8, True](capacity=io.BUFFER_SIZE) + while True: + var temp = List[UInt8, True](capacity=io.BUFFER_SIZE) + _ = self.read(temp) + + # If new bytes will overflow the result, resize it. + if len(bytes) + len(temp) > bytes.capacity: + bytes.reserve(bytes.capacity * 2) + bytes.extend(temp) + + if len(temp) < io.BUFFER_SIZE: + return bytes, io.EOF + + fn read_byte(inout self) -> (UInt8, Error): + """Read a single byte from the file handle. + + Returns: + The byte read from the file handle, or an error if one occurred. + """ + try: + var bytes: List[UInt8] + var err: Error + bytes, err = self.read_bytes(1) + return bytes[0], Error() + except e: + return UInt8(0), e + + fn read_bytes(inout self, size: Int = -1) raises -> (List[UInt8], Error): + """Read `size` bytes from the file handle. + + Args: + size: The number of bytes to read. If -1, read all available bytes. + + Returns: + The bytes read from the file handle, or an error if one occurred. + """ + try: + return self.handle.read_bytes(size), Error() + except e: + return List[UInt8](), e + + fn stream_until_delimiter(inout self, inout dest: List[UInt8, True], delimiter: UInt8, max_size: Int) -> Error: + """Read from the file handle into `dest` until `delimiter` is reached. + + Args: + dest: The buffer to read data into. + delimiter: The byte to stop reading at. + max_size: The maximum number of bytes to read. + + Returns: + An error if one occurred. + """ + var byte: UInt8 + var err = Error() + for _ in range(max_size): + byte, err = self.read_byte() + if err: + return err + + if byte == delimiter: + return err + dest.append(byte) + return Error("Stream too long") + + fn seek(inout self, offset: Int, whence: Int = 0) -> (Int, Error): + """Seek to a new position in the file handle. + + Args: + offset: The offset to seek to. + whence: The reference point for the offset. + + Returns: + The new position in the file handle, or an error if one occurred. + """ + try: + var position = self.handle.seek(UInt64(offset), whence) + return int(position), Error() + except e: + return 0, e + + fn write(inout self, src: Span[UInt8]) -> (Int, Error): + """Write data to the file handle. + + Args: + src: The buffer to write data from. + + Returns: + The number of bytes written, or an error if one occurred. + """ + if len(src) == 0: + return 0, Error("No data to write") + + try: + self.handle.write(src.unsafe_ptr()) + return len(src), io.EOF + except e: + return 0, Error(str(e)) diff --git a/src/gojo/io/io.mojo b/src/gojo/io/io.mojo new file mode 100644 index 0000000..23bbe4d --- /dev/null +++ b/src/gojo/io/io.mojo @@ -0,0 +1,118 @@ +alias BUFFER_SIZE = 4096 +"""The default buffer size for reading and writing operations.""" + + +fn write_string[W: Writer, //](inout writer: W, string: String) -> (Int, Error): + """Writes the contents of the `string` to `writer`, which accepts a Span of bytes. + If `writer` implements `StringWriter`, `StringWriter.write_string` is invoked directly. + Otherwise, `Writer.write` is called exactly once. + + Args: + writer: The writer to write to. + string: The string to write. + + Returns: + The number of bytes written and an error, if any. + """ + return writer.write(string.as_bytes_slice()) + + +fn write_string[W: StringWriter, //](inout writer: W, string: String) -> (Int, Error): + """Writes the contents of the `string` to `writer`, which accepts a Span of bytes. + If `writer` implements `StringWriter`, `StringWriter.write_string` is invoked directly. + Otherwise, `Writer.write` is called exactly once. + + Args: + writer: The writer to write to. + string: The string to write. + + Returns: + The number of bytes written and an error, if any. + """ + return writer.write_string(string) + + +fn read_at_least[R: Reader, //](inout reader: R, inout dest: List[UInt8, True], min: Int) -> (Int, Error): + """Reads from `reader` into `dest` until it has read at least `min` bytes. + It returns the number of bytes copied and an error if fewer bytes were read. + The error is `EOF` only if no bytes were read. + If an `EOF` happens after reading fewer than min bytes, + `read_at_least` returns `ERR_UNEXPECTED_EOF`. + If min is greater than the length of `dest`, `read_at_least` returns `ERR_SHORT_BUFFER`. + On return, `n >= min` if and only if err is empty. + If `reader` returns an error having read at least min bytes, the error is dropped. + + Args: + reader: The reader to read from. + dest: The buffer to read into. + min: The minimum number of bytes to read. + + Returns: + The number of bytes read. + """ + var error = Error() + if len(dest) < min: + return 0, io.ERR_SHORT_BUFFER + + var total_bytes_read: Int = 0 + while total_bytes_read < min and not error: + var bytes_read: Int + bytes_read, error = reader.read(dest) + total_bytes_read += bytes_read + + if total_bytes_read >= min: + error = Error() + + elif total_bytes_read > 0 and str(error): + error = ERR_UNEXPECTED_EOF + + return total_bytes_read, error + + +fn read_full[R: Reader, //](inout reader: R, inout dest: List[UInt8, True]) -> (Int, Error): + """Reads exactly `len(dest)` bytes from `reader` into `dest`. + It returns the number of bytes copied and an error if fewer bytes were read. + The error is `EOF` only if no bytes were read. + If an `EOF` happens after reading some but not all the bytes, + `read_full` returns `ERR_UNEXPECTED_EOF`. + On return, `n == len(buf)` if and only if err is empty. + If `reader` returns an error having read at least `len(buf)` bytes, the error is dropped. + """ + return read_at_least(reader, dest, len(dest)) + + +# TODO: read directly into dest +fn read_all[R: Reader, //](inout reader: R) -> (List[UInt8, True], Error): + """Reads from `reader` until an error or `EOF` and returns the data it read. + A successful call returns an empty err, and not err == `EOF`. Because `read_all` is + defined to read from `src` until `EOF`, it does not treat an `EOF` from `read` + as an error to be reported. + + Args: + reader: The reader to read from. + + Returns: + The data read. + """ + var dest = List[UInt8, True](capacity=BUFFER_SIZE) + var at_eof: Bool = False + + while True: + var temp = List[UInt8, True](capacity=BUFFER_SIZE) + var bytes_read: Int + var err: Error + bytes_read, err = reader.read(temp) + if str(err) != "": + if str(err) != str(EOF): + return dest, err + + at_eof = True + + # If new bytes will overflow the result, resize it. + # if some bytes were written, how do I append before returning result on the last one? + if len(dest) + len(temp) > dest.capacity: + dest.reserve(dest.capacity * 2) + dest.extend(temp) + + if at_eof: + return dest, err diff --git a/gojo/io/std.mojo b/src/gojo/io/std.mojo similarity index 67% rename from gojo/io/std.mojo rename to src/gojo/io/std.mojo index 553be78..05f7a9e 100644 --- a/gojo/io/std.mojo +++ b/src/gojo/io/std.mojo @@ -1,20 +1,18 @@ import ..io -from ..syscall import FD +from sys import external_call @value struct STDWriter[file_descriptor: Int](Copyable, io.Writer, io.StringWriter): """A writer for POSIX file descriptors.""" - @always_inline fn __init__(inout self): constrained[ - file_descriptor == FD.STDOUT or file_descriptor == FD.STDERR, + file_descriptor == 1 or file_descriptor == 2, "The STDWriter Struct is meant to write to STDOUT and STDERR. file_descriptor must be 1 or 2.", ]() - @always_inline - fn _write(inout self, src: Span[UInt8]) -> (Int, Error): + fn write(inout self, src: Span[UInt8]) -> (Int, Error): """Writes the given bytes to the file descriptor. Args: @@ -32,19 +30,6 @@ struct STDWriter[file_descriptor: Int](Copyable, io.Writer, io.StringWriter): return write_count, Error() - @always_inline - fn write(inout self, src: List[UInt8]) -> (Int, Error): - """Writes the given bytes to the file descriptor. - - Args: - src: The bytes to write to the file descriptor. - - Returns: - The number of bytes written to the file descriptor. - """ - return self._write(Span(src)) - - @always_inline fn write_string(inout self, src: String) -> (Int, Error): """Writes the given string to the file descriptor. @@ -54,9 +39,8 @@ struct STDWriter[file_descriptor: Int](Copyable, io.Writer, io.StringWriter): Returns: The number of bytes written to the file descriptor. """ - return self._write(src.as_bytes_slice()) + return self.write(src.as_bytes_slice()) - @always_inline fn read_from[R: io.Reader](inout self, inout reader: R) -> (Int, Error): """Reads from the given reader to a temporary buffer and writes to the file descriptor. @@ -66,6 +50,6 @@ struct STDWriter[file_descriptor: Int](Copyable, io.Writer, io.StringWriter): Returns: The number of bytes written to the file descriptor. """ - var buffer = List[UInt8](capacity=io.BUFFER_SIZE) + var buffer = List[UInt8, True](capacity=io.BUFFER_SIZE) _ = reader.read(buffer) - return self._write(Span(buffer)) + return self.write(Span(buffer)) diff --git a/gojo/net/__init__.mojo b/src/gojo/net/__init__.mojo similarity index 100% rename from gojo/net/__init__.mojo rename to src/gojo/net/__init__.mojo diff --git a/gojo/net/address.mojo b/src/gojo/net/address.mojo similarity index 94% rename from gojo/net/address.mojo rename to src/gojo/net/address.mojo index 9278d9c..bffc75a 100644 --- a/gojo/net/address.mojo +++ b/src/gojo/net/address.mojo @@ -57,6 +57,15 @@ struct BaseAddr: fn resolve_internet_addr(network: String, address: String) -> (TCPAddr, Error): + """Resolve an address to a TCPAddr. + + Args: + network: The network type. + address: The address to resolve. + + Returns: + A TCPAddr struct representing the resolved address. + """ var host: String = "" var port: String = "" var portnum: Int = 0 @@ -101,7 +110,7 @@ struct HostPort(Stringable): self.port = port fn __str__(self) -> String: - return join_host_port(self.host, str(self.port)) + return self.host + ":" + str(self.port) fn join_host_port(host: String, port: String) -> String: diff --git a/gojo/net/fd.mojo b/src/gojo/net/fd.mojo similarity index 57% rename from gojo/net/fd.mojo rename to src/gojo/net/fd.mojo index 5511422..87fede7 100644 --- a/gojo/net/fd.mojo +++ b/src/gojo/net/fd.mojo @@ -1,36 +1,33 @@ +from utils import Span import ..io from ..syscall import ( recv, send, close, - FileDescriptorBase, ) +from sys import external_call alias O_RDWR = 0o2 -struct FileDescriptor(FileDescriptorBase): +struct FileDescriptor(io.ReadWriteCloser): var fd: Int var is_closed: Bool - @always_inline fn __init__(inout self, fd: Int): self.fd = fd self.is_closed = False - @always_inline fn __moveinit__(inout self, owned existing: Self): self.fd = existing.fd self.is_closed = existing.is_closed - @always_inline fn __del__(owned self): if not self.is_closed: var err = self.close() if err: print(str(err)) - @always_inline fn close(inout self) -> Error: """Mark the file descriptor as closed.""" var close_status = close(self.fd) @@ -40,43 +37,46 @@ struct FileDescriptor(FileDescriptorBase): self.is_closed = True return Error() - @always_inline - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): - """Receive data from the file descriptor and write it to the buffer provided.""" - var bytes_received = recv( - self.fd, - dest.unsafe_ptr() + len(dest), - capacity - len(dest), - 0, - ) + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + """Receive data from the file descriptor and write it to the buffer provided. + + Args: + dest: The destination buffer to write the data to. + capacity: The capacity of the destination buffer. + + Returns: + The number of bytes read, or an error if one occurred. + """ + var bytes_received = recv(self.fd, dest, capacity, 0) if bytes_received == 0: return bytes_received, io.EOF if bytes_received == -1: return 0, Error("Failed to receive message from socket.") - dest._len += bytes_received return bytes_received, Error() - @always_inline - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): - """Receive data from the file descriptor and write it to the buffer provided.""" - var span = Span(dest) + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + """Receive data from the file descriptor and write it to the buffer provided. + + Args: + dest: The destination buffer to write the data to. + Returns: + The number of bytes read, or an error if one occurred. + """ + if dest.size == dest.capacity: + return 0, Error("net.FileDescriptor.read: no space left in destination buffer.") + + var dest_ptr = dest.unsafe_ptr().offset(dest.size) var bytes_read: Int var err: Error - bytes_read, err = self._read(span, dest.capacity) + bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) dest.size += bytes_read return bytes_read, err - @always_inline - fn write(inout self, src: List[UInt8]) -> (Int, Error): - """Write data from the buffer to the file descriptor.""" - return self._write(Span(src)) - - @always_inline - fn _write(inout self, src: Span[UInt8]) -> (Int, Error): + fn write(inout self, src: Span[UInt8]) -> (Int, Error): """Write data from the buffer to the file descriptor.""" var bytes_sent = send(self.fd, src.unsafe_ptr(), len(src), 0) if bytes_sent == -1: diff --git a/gojo/net/ip.mojo b/src/gojo/net/ip.mojo similarity index 72% rename from gojo/net/ip.mojo rename to src/gojo/net/ip.mojo index 4af7748..9221012 100644 --- a/gojo/net/ip.mojo +++ b/src/gojo/net/ip.mojo @@ -1,5 +1,5 @@ -from utils.variant import Variant -from utils.static_tuple import StaticTuple +from collections import InlineArray +from utils import Variant, StaticTuple from sys.info import os_is_linux, os_is_macos from ..syscall import ( c_int, @@ -39,10 +39,10 @@ fn get_addr_info(host: String) raises -> AddrInfo: ) var status = getaddrinfo( - host.unsafe_uint8_ptr(), + host.unsafe_ptr(), UnsafePointer[UInt8](), - UnsafePointer.address_of(hints), - UnsafePointer.address_of(servinfo), + Reference(hints), + Reference(servinfo), ) if status != 0: print("getaddrinfo failed to execute with status:", status) @@ -51,7 +51,7 @@ fn get_addr_info(host: String) raises -> AddrInfo: print("servinfo is null") raise Error("Failed to get address info. Pointer to addrinfo is null.") - return move_from_pointee(servinfo) + return servinfo.take_pointee() elif os_is_linux(): var servinfo = UnsafePointer[addrinfo_unix]().alloc(1) servinfo[0] = addrinfo_unix() @@ -62,10 +62,10 @@ fn get_addr_info(host: String) raises -> AddrInfo: ) var status = getaddrinfo_unix( - host.unsafe_uint8_ptr(), + host.unsafe_ptr(), UnsafePointer[UInt8](), - UnsafePointer.address_of(hints), - UnsafePointer.address_of(servinfo), + Reference(hints), + Reference(servinfo), ) if status != 0: print("getaddrinfo failed to execute with status:", status) @@ -74,7 +74,7 @@ fn get_addr_info(host: String) raises -> AddrInfo: print("servinfo is null") raise Error("Failed to get address info. Pointer to addrinfo is null.") - return move_from_pointee(servinfo) + return servinfo.take_pointee() else: raise Error("Windows is not supported yet! Sorry!") @@ -102,7 +102,7 @@ fn get_ip_address(host: String) raises -> String: raise Error("Failed to get IP address. getaddrinfo was called successfully, but ai_addr is null.") # Cast sockaddr struct to sockaddr_in struct and convert the binary IP to a string using inet_ntop. - var addr_in = move_from_pointee(ai_addr.bitcast[sockaddr_in]()) + var addr_in = ai_addr.bitcast[sockaddr_in]().take_pointee() return convert_binary_ip_to_string(addr_in.sin_addr.s_addr, address_family, address_length).strip() @@ -116,12 +116,12 @@ fn convert_binary_port_to_int(port: UInt16) -> Int: fn convert_ip_to_binary(ip_address: String, address_family: Int) -> UInt32: - var ip_buffer = UnsafePointer[UInt8].alloc(4) - var status = inet_pton(address_family, ip_address.unsafe_uint8_ptr(), ip_buffer) + var ip = List[UInt8, True](0, 0, 0, 0) + var status = inet_pton(address_family, ip_address.unsafe_ptr(), ip.unsafe_ptr()) if status == -1: print("Failed to convert IP address to binary") - return move_from_pointee(ip_buffer.bitcast[c_uint]()) + return ip.steal_data().bitcast[c_uint]().take_pointee() fn convert_binary_ip_to_string(owned ip_address: UInt32, address_family: Int32, address_length: UInt32) -> String: @@ -137,20 +137,33 @@ fn convert_binary_ip_to_string(owned ip_address: UInt32, address_family: Int32, """ # It seems like the len of the buffer depends on the length of the string IP. # Allocating 10 works for localhost (127.0.0.1) which I suspect is 9 bytes + 1 null terminator byte. So max should be 16 (15 + 1). - var ip_buffer = UnsafePointer[c_void].alloc(16) - var ip_address_ptr = UnsafePointer.address_of(ip_address).bitcast[c_void]() - _ = inet_ntop(address_family, ip_address_ptr, ip_buffer, 16) + var ip = String(List[UInt8, True](0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) + _ = inet_ntop(address_family, UnsafePointer.address_of(ip_address).bitcast[UInt8](), ip.unsafe_ptr(), 16) var index = 0 while True: - if ip_buffer[index] == 0: - break index += 1 + if ip._buffer[index] == 0: + break + + ip._buffer.size = index + ip._buffer.append(0) + return ip + + +fn build_sockaddr(ip_address: String, port: Int, address_family: Int) -> sockaddr: + """Build a sockaddr pointer from an IP address and port number. + https://learn.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 + https://learn.microsoft.com/en-us/windows/win32/api/ws2def/ns-ws2def-sockaddr_in. + """ + var bin_port = convert_port_to_binary(port) + var bin_ip = convert_ip_to_binary(ip_address, address_family) - return StringRef(ip_buffer, index) + var ai = sockaddr_in(address_family, bin_port, bin_ip, StaticTuple[c_char, 8](0, 0, 0, 0, 0, 0, 0, 0)) + return UnsafePointer.address_of(ai).bitcast[sockaddr]().take_pointee() -fn build_sockaddr_pointer(ip_address: String, port: Int, address_family: Int) -> UnsafePointer[sockaddr]: +fn build_sockaddr_in(ip_address: String, port: Int, address_family: Int) -> sockaddr_in: """Build a sockaddr pointer from an IP address and port number. https://learn.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 https://learn.microsoft.com/en-us/windows/win32/api/ws2def/ns-ws2def-sockaddr_in. @@ -158,11 +171,10 @@ fn build_sockaddr_pointer(ip_address: String, port: Int, address_family: Int) -> var bin_port = convert_port_to_binary(port) var bin_ip = convert_ip_to_binary(ip_address, address_family) - var ai = sockaddr_in(address_family, bin_port, bin_ip, StaticTuple[c_char, 8]()) - return UnsafePointer[sockaddr_in].address_of(ai).bitcast[sockaddr]() + return sockaddr_in(address_family, bin_port, bin_ip, StaticTuple[c_char, 8](0, 0, 0, 0, 0, 0, 0, 0)) -fn convert_sockaddr_to_host_port(sockaddr: UnsafePointer[sockaddr]) -> (HostPort, Error): +fn convert_sockaddr_to_host_port(owned sockaddr: sockaddr) -> (HostPort, Error): """Casts a sockaddr pointer to a sockaddr_in pointer and converts the binary IP and port to a string and int respectively. Args: @@ -171,11 +183,11 @@ fn convert_sockaddr_to_host_port(sockaddr: UnsafePointer[sockaddr]) -> (HostPort Returns: A tuple containing the HostPort and an Error if any occurred,. """ - if not sockaddr: + if not UnsafePointer.address_of(sockaddr): return HostPort(), Error("sockaddr is null, nothing to convert.") # Cast sockaddr struct to sockaddr_in to convert binary IP to string. - var addr_in = move_from_pointee(sockaddr.bitcast[sockaddr_in]()) + var addr_in = UnsafePointer.address_of(sockaddr).bitcast[sockaddr_in]().take_pointee() return ( HostPort( diff --git a/gojo/net/socket.mojo b/src/gojo/net/socket.mojo similarity index 81% rename from gojo/net/socket.mojo rename to src/gojo/net/socket.mojo index f21867d..3f1d319 100644 --- a/gojo/net/socket.mojo +++ b/src/gojo/net/socket.mojo @@ -1,13 +1,5 @@ +from utils import Span from ..syscall import ( - sockaddr, - sockaddr_in, - addrinfo, - addrinfo_unix, - socklen_t, - c_void, - c_uint, - c_char, - c_int, socket, connect, recv, @@ -30,27 +22,38 @@ from ..syscall import ( getsockopt, getsockname, getpeername, + close, + sockaddr, + sockaddr_in, + addrinfo, + addrinfo_unix, + socklen_t, + c_void, + c_uint, + c_char, + c_int, AddressFamily, AddressInformation, SocketOptions, SocketType, SHUT_RDWR, SOL_SOCKET, - close, ) -from .fd import FileDescriptor, FileDescriptorBase from .ip import ( convert_binary_ip_to_string, - build_sockaddr_pointer, + build_sockaddr, + build_sockaddr_in, convert_binary_port_to_int, convert_sockaddr_to_host_port, ) +from .fd import FileDescriptor from .address import Addr, BaseAddr, HostPort +from sys import sizeof, external_call alias SocketClosedError = Error("Socket: Socket is already closed") -struct Socket(FileDescriptorBase): +struct Socket(io.ReadWriteCloser): """Represents a network file descriptor. Wraps around a file descriptor and provides network functions. Args: @@ -159,43 +162,40 @@ struct Socket(FileDescriptorBase): if err: print("Failed to close socket during deletion:", str(err)) - @always_inline fn local_address_as_udp(self) -> UDPAddr: return UDPAddr(self.local_address) - @always_inline fn local_address_as_tcp(self) -> TCPAddr: return TCPAddr(self.local_address) - @always_inline fn remote_address_as_udp(self) -> UDPAddr: return UDPAddr(self.remote_address) - @always_inline fn remote_address_as_tcp(self) -> TCPAddr: return TCPAddr(self.remote_address) - @always_inline fn accept(self) raises -> Socket: """Accept a connection. The socket must be bound to an address and listening for connections. The return value is a connection where conn is a new socket object usable to send and receive data on the connection, and address is the address bound to the socket on the other end of the connection. """ - var remote_address_ptr = UnsafePointer[sockaddr].alloc(1) - var sin_size = socklen_t(sizeof[socklen_t]()) + var remote_address = sockaddr() var new_fd = accept( self.fd.fd, - remote_address_ptr, - UnsafePointer[socklen_t].address_of(sin_size), + Reference(remote_address), + Reference(socklen_t(sizeof[socklen_t]())), ) if new_fd == -1: + _ = external_call["perror", c_void, UnsafePointer[UInt8]](String("accept").unsafe_ptr()) raise Error("Failed to accept connection") + # TODO: Switch to reference here var remote: HostPort var err: Error - remote, err = convert_sockaddr_to_host_port(remote_address_ptr) + remote, err = convert_sockaddr_to_host_port(remote_address) if err: raise err + _ = remote_address return Socket( new_fd, @@ -218,7 +218,6 @@ struct Socket(FileDescriptorBase): if listen(self.fd.fd, queued) == -1: raise Error("Failed to listen for connections") - @always_inline fn bind(inout self, address: String, port: Int) raises: """Bind the socket to address. The socket must not already be bound. (The format of address depends on the address family). @@ -234,39 +233,37 @@ struct Socket(FileDescriptorBase): address: String - The IP address to bind the socket to. port: The port number to bind the socket to. """ - var sockaddr_pointer = build_sockaddr_pointer(address, port, self.address_family) - - if bind(self.fd.fd, sockaddr_pointer, sizeof[sockaddr_in]()) == -1: + var local_address = build_sockaddr_in(address, port, self.address_family) + if bind(self.fd.fd, Reference(local_address), sizeof[sockaddr_in]()) == -1: + _ = external_call["perror", c_void, UnsafePointer[UInt8]](String("bind").unsafe_ptr()) _ = shutdown(self.fd.fd, SHUT_RDWR) raise Error("Binding socket failed. Wait a few seconds and try again?") + _ = local_address var local = self.get_sock_name() self.local_address = BaseAddr(local.host, local.port) - @always_inline fn file_no(self) -> Int32: """Return the file descriptor of the socket.""" return self.fd.fd - @always_inline fn get_sock_name(self) raises -> HostPort: """Return the address of the socket.""" if self._closed: raise SocketClosedError # TODO: Add check to see if the socket is bound and error if not. - - var local_address_ptr = UnsafePointer[sockaddr].alloc(1) - var local_address_ptr_size = socklen_t(sizeof[sockaddr]()) + var local_address = sockaddr() + var local_address_size = socklen_t(sizeof[sockaddr]()) var status = getsockname( self.fd.fd, - local_address_ptr, - UnsafePointer[socklen_t].address_of(local_address_ptr_size), + Reference(local_address), + Reference(local_address_size), ) if status == -1: + _ = external_call["perror", c_void, UnsafePointer[UInt8]]("getsockname".unsafe_ptr()) raise Error("Socket.get_sock_name: Failed to get address of local socket.") - var addr_in = move_from_pointee(local_address_ptr.bitcast[sockaddr_in]()) - + var addr_in = UnsafePointer.address_of(local_address).bitcast[sockaddr_in]().take_pointee() return HostPort( host=convert_binary_ip_to_string(addr_in.sin_addr.s_addr, AddressFamily.AF_INET, 16), port=convert_binary_port_to_int(addr_in.sin_port), @@ -278,22 +275,21 @@ struct Socket(FileDescriptorBase): return HostPort(), SocketClosedError # TODO: Add check to see if the socket is bound and error if not. - var remote_address_ptr = UnsafePointer[sockaddr].alloc(1) - var remote_address_ptr_size = socklen_t(sizeof[sockaddr]()) + var remote_address = sockaddr() + var remote_address_size = socklen_t(sizeof[sockaddr]()) var status = getpeername( self.fd.fd, - remote_address_ptr, - UnsafePointer[socklen_t].address_of(remote_address_ptr_size), + Reference(remote_address), + Reference(remote_address_size), ) if status == -1: return HostPort(), Error("Socket.get_peer_name: Failed to get address of remote socket.") var remote: HostPort var err: Error - remote, err = convert_sockaddr_to_host_port(remote_address_ptr) + remote, err = convert_sockaddr_to_host_port(remote_address) if err: return HostPort(), err - return remote, Error() fn get_socket_option(self, option_name: Int) raises -> Int: @@ -303,19 +299,18 @@ struct Socket(FileDescriptorBase): option_name: The socket option to get. """ var option_value_pointer = UnsafePointer[c_void].alloc(1) - var option_len = socklen_t(sizeof[socklen_t]()) - var option_len_pointer = UnsafePointer.address_of(option_len) + var option_len = socklen_t(sizeof[c_void]()) var status = getsockopt( self.fd.fd, SOL_SOCKET, option_name, option_value_pointer, - option_len_pointer, + Reference(option_len), ) if status == -1: raise Error("Socket.get_sock_opt failed with status: " + str(status)) - return move_from_pointee(option_value_pointer.bitcast[Int]()) + return option_value_pointer.bitcast[Int]().take_pointee() fn set_socket_option(self, option_name: Int, owned option_value: UInt8 = 1) raises: """Return the value of the given socket option. @@ -343,11 +338,12 @@ struct Socket(FileDescriptorBase): address: String - The IP address to connect to. port: The port number to connect to. """ - var sockaddr_pointer = build_sockaddr_pointer(address, port, self.address_family) - - if connect(self.fd.fd, sockaddr_pointer, sizeof[sockaddr_in]()) == -1: + var sa_in = build_sockaddr_in(address, port, self.address_family) + if connect(self.fd.fd, Reference(sa_in), sizeof[sockaddr_in]()) == -1: + _ = external_call["perror", c_void, UnsafePointer[UInt8]](String("connect").unsafe_ptr()) self.shutdown() return Error("Socket.connect: Failed to connect to the remote socket at: " + address + ":" + str(port)) + _ = sa_in var remote: HostPort var err: Error @@ -358,20 +354,7 @@ struct Socket(FileDescriptorBase): self.remote_address = BaseAddr(remote.host, remote.port) return Error() - @always_inline - fn _write(inout self: Self, src: Span[UInt8]) -> (Int, Error): - """Send data to the socket. The socket must be connected to a remote socket. - - Args: - src: The data to send. - - Returns: - The number of bytes sent. - """ - return self.fd._write(src) - - @always_inline - fn write(inout self: Self, src: List[UInt8]) -> (Int, Error): + fn write(inout self: Self, src: Span[UInt8]) -> (Int, Error): """Send data to the socket. The socket must be connected to a remote socket. Args: @@ -405,13 +388,12 @@ struct Socket(FileDescriptorBase): 0, ) if bytes_sent == -1: - return Error("Failed to send message, wrote" + String(total_bytes_sent) + "bytes before failing.") + return Error("Failed to send message, wrote" + str(total_bytes_sent) + "bytes before failing.") total_bytes_sent += bytes_sent attempts += 1 return Error() - @always_inline fn send_to(inout self, src: Span[UInt8], address: String, port: Int) -> (Int, Error): """Send data to the a remote address by connecting to the remote socket before sending. The socket must be not already be connected to a remote socket. @@ -421,12 +403,13 @@ struct Socket(FileDescriptorBase): address: The IP address to connect to. port: The port number to connect to. """ + var sa = build_sockaddr(address, port, self.address_family) var bytes_sent = sendto( self.fd.fd, src.unsafe_ptr(), len(src), 0, - build_sockaddr_pointer(address, port, self.address_family), + Reference(sa), sizeof[sockaddr_in](), ) @@ -435,8 +418,7 @@ struct Socket(FileDescriptorBase): return bytes_sent, Error() - @always_inline - fn receive(inout self, size: Int = io.BUFFER_SIZE) -> (List[UInt8], Error): + fn receive(inout self, size: Int = io.BUFFER_SIZE) -> (List[UInt8, True], Error): """Receive data from the socket into the buffer with capacity of `size` bytes. Args: @@ -453,17 +435,16 @@ struct Socket(FileDescriptorBase): 0, ) if bytes_received == -1: - return List[UInt8](), Error("Socket.receive: Failed to receive message from socket.") + return List[UInt8, True](), Error("Socket.receive: Failed to receive message from socket.") - var bytes = List[UInt8](unsafe_pointer=buffer, size=bytes_received, capacity=size) + var bytes = List[UInt8, True](unsafe_pointer=buffer, size=bytes_received, capacity=size) if bytes_received < bytes.capacity: return bytes, io.EOF return bytes, Error() - @always_inline - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): - """Receive data from the socket into the buffer dest. Equivalent to recv_into(). + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + """Receive data from the socket into the buffer dest. Args: dest: The buffer to read data into. @@ -474,9 +455,8 @@ struct Socket(FileDescriptorBase): """ return self.fd._read(dest, capacity) - @always_inline - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): - """Receive data from the socket into the buffer dest. Equivalent to recv_into(). + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + """Receive data from the socket into the buffer dest. Equivalent to `recv_into()`. Args: dest: The buffer to read data into. @@ -484,17 +464,20 @@ struct Socket(FileDescriptorBase): Returns: The number of bytes read, and an error if one occurred. """ - var span = Span(dest) + return self.fd.read(dest) + # if dest.size == dest.capacity: + # return 0, Error("net.socket.Socket.read: no space left in destination buffer.") - var bytes_read: Int - var err: Error - bytes_read, err = self._read(span, dest.capacity) - dest.size += bytes_read + # var dest_ptr = dest.unsafe_ptr().offset(dest.size) + # var bytes_read: Int + # var err: Error + # bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) + # dest.size += bytes_read - return bytes_read, err + # print(bytes_read, str(err)) + # return bytes_read, err - @always_inline - fn receive_from(inout self, size: Int = io.BUFFER_SIZE) -> (List[UInt8], HostPort, Error): + fn receive_from(inout self, size: Int = io.BUFFER_SIZE) -> (List[UInt8, True], HostPort, Error): """Receive data from the socket into the buffer dest. Args: @@ -503,7 +486,8 @@ struct Socket(FileDescriptorBase): Returns: The number of bytes read, the remote address, and an error if one occurred. """ - var remote_address_ptr = UnsafePointer[sockaddr].alloc(1) + var remote_address = sockaddr() + # var remote_address_ptr = UnsafePointer[sockaddr].alloc(1) var remote_address_ptr_size = socklen_t(sizeof[sockaddr]()) var buffer = UnsafePointer[UInt8].alloc(size) var bytes_received = recvfrom( @@ -511,37 +495,37 @@ struct Socket(FileDescriptorBase): buffer, size, 0, - remote_address_ptr, - UnsafePointer[socklen_t].address_of(remote_address_ptr_size), + Reference(remote_address), + Reference(remote_address_ptr_size), ) if bytes_received == -1: - return List[UInt8](), HostPort(), Error("Failed to read from socket, received a -1 response.") + return List[UInt8, True](), HostPort(), Error("Failed to read from socket, received a -1 response.") var remote: HostPort var err: Error - remote, err = convert_sockaddr_to_host_port(remote_address_ptr) + remote, err = convert_sockaddr_to_host_port(remote_address) if err: - return List[UInt8](), HostPort(), err + return List[UInt8, True](), HostPort(), err - var bytes = List[UInt8](unsafe_pointer=buffer, size=bytes_received, capacity=size) + var bytes = List[UInt8, True](unsafe_pointer=buffer, size=bytes_received, capacity=size) if bytes_received < bytes.capacity: return bytes, remote, io.EOF return bytes, remote, Error() - @always_inline - fn receive_from_into(inout self, inout dest: List[UInt8]) -> (Int, HostPort, Error): + fn receive_from_into(inout self, inout dest: List[UInt8, True]) -> (Int, HostPort, Error): """Receive data from the socket into the buffer dest.""" - var remote_address_ptr = UnsafePointer[sockaddr].alloc(1) + var remote_address = sockaddr() + # var remote_address_ptr = UnsafePointer[sockaddr].alloc(1) var remote_address_ptr_size = socklen_t(sizeof[sockaddr]()) var bytes_read = recvfrom( self.fd.fd, dest.unsafe_ptr() + dest.size, dest.capacity - dest.size, 0, - remote_address_ptr, - UnsafePointer[socklen_t].address_of(remote_address_ptr_size), + Reference(remote_address), + Reference(remote_address_ptr_size), ) dest.size += bytes_read @@ -550,7 +534,7 @@ struct Socket(FileDescriptorBase): var remote: HostPort var err: Error - remote, err = convert_sockaddr_to_host_port(remote_address_ptr) + remote, err = convert_sockaddr_to_host_port(remote_address) if err: return 0, HostPort(), err @@ -559,11 +543,9 @@ struct Socket(FileDescriptorBase): return bytes_read, remote, Error() - @always_inline fn shutdown(self): _ = shutdown(self.fd.fd, SHUT_RDWR) - @always_inline fn close(inout self) -> Error: """Mark the socket closed. Once that happens, all future operations on the socket object will fail. @@ -590,7 +572,6 @@ struct Socket(FileDescriptorBase): # """ # self.set_socket_option(SocketOptions.SO_RCVTIMEO, duration) - @always_inline fn send_file(self, file: FileHandle) -> Error: try: var bytes = file.read_bytes() diff --git a/gojo/net/tcp.mojo b/src/gojo/net/tcp.mojo similarity index 89% rename from gojo/net/tcp.mojo rename to src/gojo/net/tcp.mojo index 148565e..2c693c4 100644 --- a/gojo/net/tcp.mojo +++ b/src/gojo/net/tcp.mojo @@ -1,3 +1,4 @@ +from utils import Span from collections import InlineList from ..syscall import SocketOptions from .address import NetworkType, split_host_port, join_host_port, BaseAddr, resolve_internet_addr, HostPort @@ -46,16 +47,13 @@ struct TCPConnection(Movable): var socket: Socket - @always_inline fn __init__(inout self, owned socket: Socket): self.socket = socket^ - @always_inline fn __moveinit__(inout self, owned existing: Self): self.socket = existing.socket^ - @always_inline - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): """Reads data from the underlying file descriptor. Args: @@ -74,8 +72,7 @@ struct TCPConnection(Movable): return bytes_read, err - @always_inline - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): """Reads data from the underlying file descriptor. Args: @@ -84,29 +81,18 @@ struct TCPConnection(Movable): Returns: The number of bytes read, or an error if one occurred. """ - var span = Span(dest) + if dest.size == dest.capacity: + return 0, Error("net.tcp.TCPConnection.read: no space left in destination buffer.") + var dest_ptr = dest.unsafe_ptr().offset(dest.size) var bytes_read: Int var err: Error - bytes_read, err = self._read(span, dest.capacity) + bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) dest.size += bytes_read return bytes_read, err - @always_inline - fn _write(inout self, src: Span[UInt8]) -> (Int, Error): - """Writes data to the underlying file descriptor. - - Args: - src: The buffer to read data into. - - Returns: - The number of bytes written, or an error if one occurred. - """ - return self.socket._write(src) - - @always_inline - fn write(inout self, src: List[UInt8]) -> (Int, Error): + fn write(inout self, src: Span[UInt8]) -> (Int, Error): """Writes data to the underlying file descriptor. Args: @@ -117,7 +103,6 @@ struct TCPConnection(Movable): """ return self.socket.write(src) - @always_inline fn close(inout self) -> Error: """Closes the underlying file descriptor. @@ -126,7 +111,6 @@ struct TCPConnection(Movable): """ return self.socket.close() - @always_inline fn local_address(self) -> TCPAddr: """Returns the local network address. The Addr returned is shared by all invocations of local_address, so do not modify it. @@ -136,7 +120,6 @@ struct TCPConnection(Movable): """ return self.socket.local_address_as_tcp() - @always_inline fn remote_address(self) -> TCPAddr: """Returns the remote network address. The Addr returned is shared by all invocations of remote_address, so do not modify it. diff --git a/gojo/net/udp.mojo b/src/gojo/net/udp.mojo similarity index 96% rename from gojo/net/udp.mojo rename to src/gojo/net/udp.mojo index ebc0fc0..aee43f7 100644 --- a/gojo/net/udp.mojo +++ b/src/gojo/net/udp.mojo @@ -1,4 +1,5 @@ -from collections import InlineList +from collections import InlineArray, InlineList +from utils import Span from ..syscall import SocketOptions, SocketType from .address import NetworkType, split_host_port, join_host_port, BaseAddr, resolve_internet_addr from .socket import Socket @@ -49,7 +50,7 @@ struct UDPConnection(Movable): fn __moveinit__(inout self, owned existing: Self): self.socket = existing.socket^ - fn read_from(inout self, inout dest: List[UInt8]) -> (Int, HostPort, Error): + fn read_from(inout self, inout dest: List[UInt8, True]) -> (Int, HostPort, Error): """Reads data from the underlying file descriptor. Args: @@ -63,7 +64,7 @@ struct UDPConnection(Movable): var err = Error() bytes_read, remote, err = self.socket.receive_from_into(dest) if err: - if str(err) != io.EOF: + if str(err) != str(io.EOF): return bytes_read, remote, err return bytes_read, remote, err diff --git a/src/gojo/strings/__init__.mojo b/src/gojo/strings/__init__.mojo new file mode 100644 index 0000000..ae88b6f --- /dev/null +++ b/src/gojo/strings/__init__.mojo @@ -0,0 +1,2 @@ +from .builder import StringBuilder +from .reader import Reader diff --git a/gojo/strings/builder.mojo b/src/gojo/strings/builder.mojo similarity index 52% rename from gojo/strings/builder.mojo rename to src/gojo/strings/builder.mojo index 24ac6cd..879c01a 100644 --- a/gojo/strings/builder.mojo +++ b/src/gojo/strings/builder.mojo @@ -1,3 +1,6 @@ +from collections import InlineArray +from utils import StringSlice, Span +from memory import memcpy import ..io @@ -12,9 +15,9 @@ struct StringBuilder[growth_factor: Float32 = 2]( A string builder class that allows for efficient string management and concatenation. This class is useful when you need to build a string by appending multiple strings together. The performance increase is not linear. Compared to string concatenation, - I've observed around 20-30x faster for writing and rending ~4KB and up to 2100x-2300x + I've observed around 20-30x faster for writing and rending ~4KB and up to 400x-500x for ~4MB. This is because it avoids the overhead of creating and destroying many - intermediate strings and performs memcopy operations. + intermediate strings and performs memcpy operations. The result is a more efficient when building larger string concatenations. It is generally not recommended to use this class for small concatenations such as @@ -22,28 +25,34 @@ struct StringBuilder[growth_factor: Float32 = 2]( builder and appending the strings is not worth the performance gain. Example: - ``` - from strings.builder import StringBuilder - - var sb = StringBuilder() - sb.write_string("Hello ") - sb.write_string("World!") - print(sb) # Hello World! - ``` + ```mojo + from gojo.strings import StringBuilder + + var sb = StringBuilder() + _ = sb.write_string("Hello ") + _ = sb.write_string("World!") + print(str(sb)) # Hello World! + ``` """ var _data: UnsafePointer[UInt8] + """The internal buffer that holds the string data.""" var _size: Int + """The current size of the string builder.""" var _capacity: Int + """The current maximum capacity of the string builder.""" - @always_inline fn __init__(inout self, *, capacity: Int = 4096): + """Creates a new string builder with the given capacity. + + Args: + capacity: The initial capacity of the string builder. The default is 4096. + """ constrained[growth_factor >= 1.25]() self._data = UnsafePointer[UInt8]().alloc(capacity) self._size = 0 self._capacity = capacity - @always_inline fn __moveinit__(inout self, owned other: Self): self._data = other._data self._size = other._size @@ -52,66 +61,66 @@ struct StringBuilder[growth_factor: Float32 = 2]( other._size = 0 other._capacity = 0 - @always_inline fn __del__(owned self): if self._data: self._data.free() - @always_inline fn __len__(self) -> Int: """Returns the length of the string builder.""" return self._size - @always_inline - fn __str__(self) -> String: + fn as_bytes_slice(ref [_]self) -> Span[UInt8, __lifetime_of(self)]: + """Returns the internal data as a Span[UInt8].""" + return Span[UInt8, __lifetime_of(self)](unsafe_ptr=self._data, len=self._size) + + fn as_string_slice(ref [_]self) -> StringSlice[__lifetime_of(self)]: + """Return a StringSlice view of the data owned by the builder. + + Returns: + The string representation of the string builder. Returns an empty string if the string builder is empty. """ - Converts the string builder to a string. + return StringSlice[__lifetime_of(self)](unsafe_from_utf8_ptr=self._data, len=self._size) + + fn __str__(self) -> String: + """Converts the string builder to a string. Returns: The string representation of the string builder. Returns an empty string if the string builder is empty. """ - var copy = UnsafePointer[UInt8]().alloc(self._size) - memcpy(copy, self._data, self._size) - return StringRef(copy, self._size) - - @always_inline - fn as_bytes_slice(self: Reference[Self]) -> Span[UInt8, self.is_mutable, self.lifetime]: - """Returns the internal _data as a Span[UInt8].""" - return Span[UInt8, self.is_mutable, self.lifetime](unsafe_ptr=self[]._data, len=self[]._size) - - @always_inline - fn render( - self: Reference[Self], - ) -> StringSlice[self.is_mutable, self.lifetime]: - """ - Return a StringSlice view of the _data owned by the builder. - Slightly faster than __str__, 10-20% faster in limited testing. + return self.as_string_slice() + + @deprecated( + "StringBuilder.render() has been deprecated. Use StringBuilder.as_string_slice() or call str() instead." + ) + fn render(ref [_]self) -> String: + """Return a StringSlice view of the data owned by the builder. Returns: - The string representation of the string builder. Returns an empty string if the string builder is empty. + The string representation of the string builder. Returns an empty string if the string builder is empty. """ - return StringSlice[self.is_mutable, self.lifetime](unsafe_from_utf8_ptr=self[]._data, len=self[]._size) + return self.as_string_slice() - @always_inline - fn _resize(inout self, _capacity: Int) -> None: - """ - Resizes the string builder buffer. + fn _resize(inout self, capacity: Int) -> None: + """Resizes the string builder buffer. Args: - _capacity: The new _capacity of the string builder buffer. + capacity: The new capacity of the string builder buffer. """ - var new__data = UnsafePointer[UInt8]().alloc(_capacity) - memcpy(new__data, self._data, self._size) + var new_data = UnsafePointer[UInt8]().alloc(capacity) + memcpy(new_data, self._data, self._size) self._data.free() - self._data = new__data - self._capacity = _capacity + self._data = new_data + self._capacity = capacity return None - @always_inline - fn _resize_if_needed(inout self, bytes_to_add: Int): - """Resizes the buffer if the bytes to add exceeds the current capacity.""" + fn _resize_if_needed(inout self, bytes_to_add: Int) -> None: + """Resizes the buffer if the bytes to add exceeds the current capacity. + + Args: + bytes_to_add: The number of bytes to add to the buffer. + """ # TODO: Handle the case where new_capacity is greater than MAX_INT. It should panic. if bytes_to_add > self._capacity - self._size: var new_capacity = int(self._capacity * 2) @@ -119,10 +128,8 @@ struct StringBuilder[growth_factor: Float32 = 2]( new_capacity = self._capacity + bytes_to_add self._resize(new_capacity) - @always_inline - fn _write(inout self, src: Span[UInt8]) -> (Int, Error): - """ - Appends a byte Span to the builder buffer. + fn write(inout self, src: Span[UInt8]) -> (Int, Error): + """Appends a byte Span to the builder buffer. Args: src: The byte array to append. @@ -133,34 +140,26 @@ struct StringBuilder[growth_factor: Float32 = 2]( return len(src), Error() - @always_inline - fn write(inout self, src: List[UInt8]) -> (Int, Error): - """ - Appends a byte List to the builder buffer. - - Args: - src: The byte array to append. - """ - var span = Span(src) - - var bytes_read: Int - var err: Error - bytes_read, err = self._write(span) - - return bytes_read, err - - @always_inline fn write_string(inout self, src: String) -> (Int, Error): - """ - Appends a string to the builder buffer. + """Appends a string to the builder buffer. Args: src: The string to append. + + Returns: + The number of bytes written to the builder buffer. """ - return self._write(src.as_bytes_slice()) + return self.write(src.as_bytes_slice()) - @always_inline fn write_byte(inout self, byte: UInt8) -> (Int, Error): + """Appends a byte to the builder buffer. + + Args: + byte: The byte to append. + + Returns: + The number of bytes written to the builder buffer. + """ self._resize_if_needed(1) self._data[self._size] = byte self._size += 1 diff --git a/gojo/strings/reader.mojo b/src/gojo/strings/reader.mojo similarity index 58% rename from gojo/strings/reader.mojo rename to src/gojo/strings/reader.mojo index 97ca0d9..70c38b5 100644 --- a/gojo/strings/reader.mojo +++ b/src/gojo/strings/reader.mojo @@ -1,9 +1,10 @@ +from utils import StringSlice, Span +from os import abort +from algorithm.memory import parallel_memcpy import ..io -from ..builtins import copy, panic @value -# TODO: Uncomment write_to and write_buf once the bug with the trait's Span argument is fixed. struct Reader( Sized, io.Reader, @@ -11,38 +12,34 @@ struct Reader( io.ByteReader, io.ByteScanner, io.Seeker, - # io.WriterTo, + io.WriterTo, ): - """A Reader that implements the [io.Reader], [io.ReaderAt], [io.ByteReader], [io.ByteScanner], [io.Seeker], and [io.WriterTo] traits + """A Reader that implements the `io.Reader`, `io.ReaderAt`, `io.ByteReader`, `io.ByteScanner`, `io.Seeker`, and `io.WriterTo` traits by reading from a string. The zero value for Reader operates like a Reader of an empty string. """ var string: String - var read_pos: Int # current reading index - var prev_rune: Int # index of previous rune; or < 0 + """Internal string to read from.""" + var read_pos: Int + """Current reading index.""" + var prev_rune: Int + """Index of previous rune; or < 0.""" - @always_inline fn __init__(inout self, string: String = ""): self.string = string self.read_pos = 0 self.prev_rune = -1 - @always_inline fn __len__(self) -> Int: - """Returns the number of bytes of the unread portion of the string. - - Returns: - int: the number of bytes of the unread portion of the string. - """ + """Returns the number of bytes of the unread portion of the string.""" if self.read_pos >= len(self.string): return 0 return len(self.string) - self.read_pos - @always_inline fn size(self) -> Int: """Returns the original length of the underlying string. - size is the number of bytes available for reading via [Reader.read_at]. + `size` is the number of bytes available for reading via `Reader.read_at`. The returned value is always the same and is not affected by calls to any other method. @@ -51,14 +48,12 @@ struct Reader( """ return len(self.string) - @always_inline - fn _read(inout self, inout dest: Span[UInt8, True], capacity: Int) -> (Int, Error): - """Reads from the underlying string into the provided List[UInt8] object. - Implements the [io.Reader] trait. + fn _read(inout self, inout dest: UnsafePointer[UInt8], capacity: Int) -> (Int, Error): + """Reads from the underlying string into the provided `dest` buffer. Args: - dest: The destination List[UInt8] object to read into. - capacity: The capacity of the destination List[UInt8] object. + dest: The destination buffer to read into. + capacity: The capacity of the destination buffer. Returns: The number of bytes read into dest. @@ -67,42 +62,46 @@ struct Reader( return 0, io.EOF self.prev_rune = -1 - var bytes_written = copy(dest, self.string.as_bytes_slice()[self.read_pos :]) - self.read_pos += bytes_written - return bytes_written, Error() + var bytes_to_read = self.string.as_bytes_slice()[self.read_pos :] + if len(bytes_to_read) > capacity: + return 0, Error("strings.Reader._read: no space left in destination buffer.") - @always_inline - fn read(inout self, inout dest: List[UInt8]) -> (Int, Error): - """Reads from the underlying string into the provided List[UInt8] object. - Implements the [io.Reader] trait. + var count = min(len(bytes_to_read), capacity) + parallel_memcpy(dest, bytes_to_read.unsafe_ptr(), count) + self.read_pos += count + return count, Error() + + fn read(inout self, inout dest: List[UInt8, True]) -> (Int, Error): + """Reads from the underlying string into the provided `dest` buffer. Args: - dest: The destination List[UInt8] object to read into. + dest: The destination buffer to read into. Returns: The number of bytes read into dest. """ - var span = Span(dest) + if dest.size == dest.capacity: + return 0, Error("strings.Reader.read: no space left in destination buffer.") + + var dest_ptr = dest.unsafe_ptr().offset(dest.size) var bytes_read: Int var err: Error - bytes_read, err = self._read(span, dest.capacity) + bytes_read, err = self._read(dest_ptr, dest.capacity - dest.size) dest.size += bytes_read return bytes_read, err - @always_inline - fn _read_at(self, inout dest: Span[UInt8, True], off: Int, capacity: Int) -> (Int, Error): - """Reads from the Reader into the dest List[UInt8] starting at the offset off. - It returns the number of bytes read into dest and an error if any. + fn _read_at(self, inout dest: Span[UInt8], off: Int, capacity: Int) -> (Int, Error): + """Reads from the Reader into the `dest` buffer starting at the offset `off`. Args: - dest: The destination List[UInt8] object to read into. + dest: The destination buffer to read into. off: The byte offset to start reading from. - capacity: The capacity of the destination List[UInt8] object. + capacity: The capacity of the destination buffer. Returns: - The number of bytes read into dest. + It returns the number of bytes read into `dest` and an error if any. """ # cannot modify state - see io.ReaderAt if off < 0: @@ -112,19 +111,21 @@ struct Reader( return 0, io.EOF var error = Error() - var copied_elements_count = copy(dest, self.string.as_bytes_slice()[off:]) - if copied_elements_count < len(dest): + var bytes_to_read = self.string.as_bytes_slice()[off:] + var count = min(len(bytes_to_read), capacity) + parallel_memcpy(dest.unsafe_ptr(), bytes_to_read.unsafe_ptr(), count) + dest._len += count + if count < len(dest): error = Error(str(io.EOF)) - return copied_elements_count, error + return count, error - @always_inline - fn read_at(self, inout dest: List[UInt8], off: Int) -> (Int, Error): - """Reads from the Reader into the dest List[UInt8] starting at the offset off. + fn read_at(self, inout dest: List[UInt8, True], off: Int) -> (Int, Error): + """Reads from the Reader into the `dest` buffer starting at the offset off. It returns the number of bytes read into dest and an error if any. Args: - dest: The destination List[UInt8] object to read into. + dest: The destination buffer to read into. off: The byte offset to start reading from. Returns: @@ -139,7 +140,6 @@ struct Reader( return bytes_read, err - @always_inline fn read_byte(inout self) -> (UInt8, Error): """Reads the next byte from the underlying string.""" self.prev_rune = -1 @@ -150,7 +150,6 @@ struct Reader( self.read_pos += 1 return UInt8(b), Error() - @always_inline fn unread_byte(inout self) -> Error: """Unreads the last byte read. Only the most recent byte read can be unread.""" if self.read_pos <= 0: @@ -193,7 +192,7 @@ struct Reader( Args: offset: The offset to seek to. - whence: The seek mode. It can be one of [io.SEEK_START], [io.SEEK_CURRENT], or [io.SEEK_END]. + whence: The seek mode. It can be one of `io.SEEK_START`, `io.SEEK_CURRENT`, or `io.SEEK_END`. Returns: The new position in the string. @@ -216,32 +215,31 @@ struct Reader( self.read_pos = position return position, Error() - # fn write_to[W: io.Writer](inout self, inout writer: W) -> (Int, Error): - # """Writes the remaining portion of the underlying string to the provided writer. - # Implements the [io.WriterTo] trait. + fn write_to[W: io.Writer, //](inout self, inout writer: W) -> (Int, Error): + """Writes the remaining portion of the underlying string to the provided writer. - # Args: - # writer: The writer to write the remaining portion of the string to. + Args: + writer: The writer to write the remaining portion of the string to. - # Returns: - # The number of bytes written to the writer. - # """ - # self.prev_rune = -1 - # var err = Error() - # if self.read_pos >= len(self.string): - # return Int(0), err + Returns: + The number of bytes written to the writer. + """ + self.prev_rune = -1 + var err = Error() + if self.read_pos >= len(self.string): + return Int(0), err - # var chunk_to_write = self.string.as_bytes_slice()[self.read_pos :] - # var bytes_written: Int - # bytes_written, err = writer.write(chunk_to_write) - # if bytes_written > len(chunk_to_write): - # panic("strings.Reader.write_to: invalid write_string count") + var chunk_to_write = self.string.as_bytes_slice()[self.read_pos :] + var bytes_written: Int + bytes_written, err = writer.write(chunk_to_write) + if bytes_written > len(chunk_to_write): + abort("strings.Reader.write_to: invalid write_string count") - # self.read_pos += bytes_written - # if bytes_written != len(chunk_to_write) and not err: - # err = Error(io.ERR_SHORT_WRITE) + self.read_pos += bytes_written + if bytes_written != len(chunk_to_write) and not err: + err = str(io.ERR_SHORT_WRITE) - # return bytes_written, err + return bytes_written, err # # TODO: How can I differentiate between the two write_to methods when the writer implements both traits? # fn write_to[W: io.StringWriter](inout self, inout writer: W) raises -> Int: @@ -269,7 +267,6 @@ struct Reader( # return Int(bytes_written) - @always_inline fn reset(inout self, string: String): """Resets the [Reader] to be reading from the beginning of the provided string. @@ -280,12 +277,21 @@ struct Reader( self.read_pos = 0 self.prev_rune = -1 + fn read_until_delimiter(inout self, delimiter: String = "\n") -> StringSlice[__lifetime_of(self)]: + """Reads from the underlying string until a delimiter is found. + The delimiter is not included in the returned string slice. -fn new_reader(string: String = "") -> Reader: - """Returns a new [Reader] reading from the provided string. - It is similar to [bytes.new_buffer] but more efficient and non-writable. + Returns: + The string slice containing the bytes read until the delimiter. + """ + var start = self.read_pos + var bytes = self.string.as_bytes_slice() + while self.read_pos < len(self.string): + if bytes[self.read_pos] == ord(delimiter): + break + self.read_pos += 1 - Args: - string: The string to read from. - """ - return Reader(string) + self.read_pos += 1 + return StringSlice[__lifetime_of(self)]( + unsafe_from_utf8_ptr=self.string.unsafe_ptr() + start, len=self.read_pos - start - 1 + ) diff --git a/gojo/syscall/__init__.mojo b/src/gojo/syscall/__init__.mojo similarity index 91% rename from gojo/syscall/__init__.mojo rename to src/gojo/syscall/__init__.mojo index c89fef0..9205fe8 100644 --- a/gojo/syscall/__init__.mojo +++ b/src/gojo/syscall/__init__.mojo @@ -9,7 +9,6 @@ from .net import ( sendto, recv, recvfrom, - open, addrinfo, addrinfo_unix, sockaddr, @@ -36,7 +35,16 @@ from .net import ( SHUT_RDWR, SOL_SOCKET, ) -from .file import close, FileDescriptorBase + +from .file import ( + close, + open, + read, + write, + O_NONBLOCK, + O_ACCMODE, + O_CLOEXEC, +) # Adapted from https://github.com/crisadamo/mojo-Libc . Huge thanks to Cristian! # C types diff --git a/gojo/syscall/file.mojo b/src/gojo/syscall/file.mojo similarity index 97% rename from gojo/syscall/file.mojo rename to src/gojo/syscall/file.mojo index ef0427e..4f01350 100644 --- a/gojo/syscall/file.mojo +++ b/src/gojo/syscall/file.mojo @@ -1,5 +1,4 @@ -trait FileDescriptorBase(io.Reader, io.Writer, io.Closer): - ... +from sys import external_call # --- ( File Related Syscalls & Structs )--------------------------------------- diff --git a/gojo/syscall/net.mojo b/src/gojo/syscall/net.mojo similarity index 69% rename from gojo/syscall/net.mojo rename to src/gojo/syscall/net.mojo index 676b2da..4721fd0 100644 --- a/gojo/syscall/net.mojo +++ b/src/gojo/syscall/net.mojo @@ -1,13 +1,12 @@ -from . import c_char, c_int, c_ushort, c_uint, c_size_t, c_ssize_t -from .file import O_CLOEXEC, O_NONBLOCK +from collections import InlineArray from utils.static_tuple import StaticTuple +from sys import external_call +from .file import O_CLOEXEC, O_NONBLOCK alias IPPROTO_IPV6 = 41 alias IPV6_V6ONLY = 26 alias EPROTONOSUPPORT = 93 -# Adapted from https://github.com/gabrieldemarmiesse/mojo-stdlib-extensions/ . Huge thanks to Gabriel! - struct FD: alias STDIN = 0 @@ -18,8 +17,6 @@ struct FD: alias SUCCESS = 0 alias GRND_NONBLOCK: UInt8 = 1 -alias char_pointer = UnsafePointer[UInt8] - # --- ( error.h Constants )----------------------------------------------------- struct ErrnoConstants: @@ -60,14 +57,6 @@ struct ErrnoConstants: alias EWOULDBLOCK = 11 -# fn to_char_ptr(s: String) -> UnsafePointer[UInt8]: -# """Only ASCII-based strings.""" -# var ptr = UnsafePointer[UInt8]().alloc(len(s)) -# for i in range(len(s)): -# ptr.store(i, ord(s[i])) -# return ptr - - fn cftob(val: c_int) -> Bool: """Convert C-like failure (-1) to Bool.""" return rebind[Bool](val > 0) @@ -298,6 +287,9 @@ struct SocketOptions: struct in_addr: var s_addr: in_addr_t + fn __init__(inout self, addr: in_addr_t = 0): + self.s_addr = addr + @value @register_passable("trivial") @@ -311,6 +303,10 @@ struct sockaddr: var sa_family: sa_family_t var sa_data: StaticTuple[c_char, 14] + fn __init__(inout self, family: sa_family_t = 0, data: StaticTuple[c_char, 14] = StaticTuple[c_char, 14]()): + self.sa_family = family + self.sa_data = data + @value @register_passable("trivial") @@ -320,6 +316,18 @@ struct sockaddr_in: var sin_addr: in_addr var sin_zero: StaticTuple[c_char, 8] + fn __init__( + inout self, + family: sa_family_t = 0, + port: in_port_t = 0, + addr: in_addr = in_addr(), + zero: StaticTuple[c_char, 8] = StaticTuple[c_char, 8](), + ): + self.sin_family = family + self.sin_port = port + self.sin_addr = addr + self.sin_zero = zero + @value @register_passable("trivial") @@ -332,7 +340,6 @@ struct sockaddr_in6: @value -@register_passable("trivial") struct addrinfo: """Struct field ordering can vary based on platform. For MacOS, I had to swap the order of ai_canonname and ai_addr. @@ -368,12 +375,8 @@ struct addrinfo: self.ai_addr = ai_addr self.ai_next = ai_next - # fn __init__() -> Self: - # return Self(0, 0, 0, 0, 0, UnsafePointer[UInt8](), UnsafePointer[sockaddr](), UnsafePointer[addrinfo]()) - @value -@register_passable("trivial") struct addrinfo_unix: """Struct field ordering can vary based on platform. For MacOS, I had to swap the order of ai_canonname and ai_addr. @@ -416,10 +419,13 @@ struct addrinfo_unix: fn htonl(hostlong: c_uint) -> c_uint: """Libc POSIX `htonl` function Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html - Fn signature: uint32_t htonl(uint32_t hostlong). + Fn signature: `uint32_t htonl(uint32_t hostlong)`. + + Args: + hostlong: A 32-bit integer in host byte order. - Args: hostlong: A 32-bit integer in host byte order. - Returns: The value provided in network byte order. + Returns: + The value provided in network byte order. """ return external_call["htonl", c_uint, c_uint](hostlong) @@ -427,10 +433,13 @@ fn htonl(hostlong: c_uint) -> c_uint: fn htons(hostshort: c_ushort) -> c_ushort: """Libc POSIX `htons` function Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html - Fn signature: uint16_t htons(uint16_t hostshort). + Fn signature: `uint16_t htons(uint16_t hostshort)`. + + Args: + hostshort: A 16-bit integer in host byte order. - Args: hostshort: A 16-bit integer in host byte order. - Returns: The value provided in network byte order. + Returns: + The value provided in network byte order. """ return external_call["htons", c_ushort, c_ushort](hostshort) @@ -438,10 +447,13 @@ fn htons(hostshort: c_ushort) -> c_ushort: fn ntohl(netlong: c_uint) -> c_uint: """Libc POSIX `ntohl` function Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html - Fn signature: uint32_t ntohl(uint32_t netlong). + Fn signature: `uint32_t ntohl(uint32_t netlong)`. - Args: netlong: A 32-bit integer in network byte order. - Returns: The value provided in host byte order. + Args: + netlong: A 32-bit integer in network byte order. + + Returns: + The value provided in host byte order. """ return external_call["ntohl", c_uint, c_uint](netlong) @@ -449,10 +461,13 @@ fn ntohl(netlong: c_uint) -> c_uint: fn ntohs(netshort: c_ushort) -> c_ushort: """Libc POSIX `ntohs` function Reference: https://man7.org/linux/man-pages/man3/htonl.3p.html - Fn signature: uint16_t ntohs(uint16_t netshort). + Fn signature: `uint16_t ntohs(uint16_t netshort)`. + + Args: + netshort: A 16-bit integer in network byte order. - Args: netshort: A 16-bit integer in network byte order. - Returns: The value provided in host byte order. + Returns: + The value provided in host byte order. """ return external_call["ntohs", c_ushort, c_ushort](netshort) @@ -465,7 +480,7 @@ fn inet_ntop( ) -> UnsafePointer[UInt8]: """Libc POSIX `inet_ntop` function Reference: https://man7.org/linux/man-pages/man3/inet_ntop.3p.html. - Fn signature: const char *inet_ntop(int af, const void *restrict src, char *restrict dst, socklen_t size). + Fn signature: `const char *inet_ntop(int af, const void *restrict src, char *restrict dst, socklen_t size)`. Args: af: Address Family see AF_ aliases. @@ -476,42 +491,40 @@ fn inet_ntop( Returns: A pointer to the buffer containing the result. """ - return external_call[ - "inet_ntop", - UnsafePointer[UInt8], # FnName, RetType - c_int, - UnsafePointer[UInt8], - UnsafePointer[UInt8], - socklen_t, # Args - ](af, src, dst, size) + return external_call["inet_ntop", UnsafePointer[UInt8]](af, src, dst, size) fn inet_pton(af: c_int, src: UnsafePointer[UInt8], dst: UnsafePointer[UInt8]) -> c_int: """Libc POSIX `inet_pton` function Reference: https://man7.org/linux/man-pages/man3/inet_ntop.3p.html - Fn signature: int inet_pton(int af, const char *restrict src, void *restrict dst). + Fn signature: `int inet_pton(int af, const char *restrict src, void *restrict dst)`. - Args: af: Address Family see AF_ aliases. + Args: + af: Address Family see AF_ aliases. src: A pointer to a string containing the address. dst: A pointer to a buffer to store the result. - Returns: 1 on success, 0 if the input is not a valid address, -1 on error. + Returns: + 1 on success, 0 if the input is not a valid address, -1 on error. """ return external_call[ "inet_pton", - c_int, # FnName, RetType c_int, + c_int, + UnsafePointer[UInt8], UnsafePointer[UInt8], - UnsafePointer[UInt8], # Args ](af, src, dst) fn inet_addr(cp: UnsafePointer[UInt8]) -> in_addr_t: """Libc POSIX `inet_addr` function Reference: https://man7.org/linux/man-pages/man3/inet_addr.3p.html - Fn signature: in_addr_t inet_addr(const char *cp). + Fn signature: `in_addr_t inet_addr(const char *cp)`. + + Args: + cp: A pointer to a string containing the address. - Args: cp: A pointer to a string containing the address. - Returns: The address in network byte order. + Returns: + The address in network byte order. """ return external_call["inet_addr", in_addr_t, UnsafePointer[UInt8]](cp) @@ -519,10 +532,13 @@ fn inet_addr(cp: UnsafePointer[UInt8]) -> in_addr_t: fn inet_ntoa(addr: in_addr) -> UnsafePointer[UInt8]: """Libc POSIX `inet_ntoa` function Reference: https://man7.org/linux/man-pages/man3/inet_addr.3p.html - Fn signature: char *inet_ntoa(struct in_addr in). + Fn signature: `char *inet_ntoa(struct in_addr in)`. + + Args: + addr: A pointer to a string containing the address. - Args: in: A pointer to a string containing the address. - Returns: The address in network byte order. + Returns: + The address in network byte order. """ return external_call["inet_ntoa", UnsafePointer[UInt8], in_addr](addr) @@ -530,14 +546,17 @@ fn inet_ntoa(addr: in_addr) -> UnsafePointer[UInt8]: fn socket(domain: c_int, type: c_int, protocol: c_int) -> c_int: """Libc POSIX `socket` function Reference: https://man7.org/linux/man-pages/man3/socket.3p.html - Fn signature: int socket(int domain, int type, int protocol). + Fn signature: `int socket(int domain, int type, int protocol)`. - Args: domain: Address Family see AF_ aliases. + Args: + domain: Address Family see AF_ aliases. type: Socket Type see SOCK_ aliases. protocol: The protocol to use. - Returns: A File Descriptor or -1 in case of failure. + + Returns: + A File Descriptor or -1 in case of failure. """ - return external_call["socket", c_int, c_int, c_int, c_int](domain, type, protocol) # FnName, RetType # Args + return external_call["socket", c_int, c_int, c_int, c_int](domain, type, protocol) fn setsockopt( @@ -549,7 +568,7 @@ fn setsockopt( ) -> c_int: """Libc POSIX `setsockopt` function Reference: https://man7.org/linux/man-pages/man3/setsockopt.3p.html - Fn signature: int setsockopt(int socket, int level, int option_name, const void *option_value, socklen_t option_len). + Fn signature: `int setsockopt(int socket, int level, int option_name, const void *option_value, socklen_t option_len)`. Args: socket: A File Descriptor. @@ -557,16 +576,18 @@ fn setsockopt( option_name: The option to set. option_value: A pointer to the value to set. option_len: The size of the value. - Returns: 0 on success, -1 on error. + + Returns: + 0 on success, -1 on error. """ return external_call[ "setsockopt", - c_int, # FnName, RetType + c_int, c_int, c_int, c_int, UnsafePointer[UInt8], - socklen_t, # Args + socklen_t, ](socket, level, option_name, option_value, option_len) @@ -575,84 +596,79 @@ fn getsockopt( level: c_int, option_name: c_int, option_value: UnsafePointer[UInt8], - option_len: UnsafePointer[socklen_t], + option_len: Reference[socklen_t], ) -> c_int: """Libc POSIX `getsockopt` function Reference: https://man7.org/linux/man-pages/man3/getsockopt.3p.html - Fn signature: int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len). + Fn signature: `int getsockopt(int socket, int level, int option_name, void *restrict option_value, socklen_t *restrict option_len)`. - Args: socket: A File Descriptor. + Args: + socket: A File Descriptor. level: The protocol level. option_name: The option to get. option_value: A pointer to the value to get. - option_len: DTypePointer to the size of the value. - Returns: 0 on success, -1 on error. + option_len: Reference to the size of the value. + + Returns: + 0 on success, -1 on error. """ - return external_call[ - "getsockopt", - c_int, # FnName, RetType - c_int, - c_int, - c_int, - UnsafePointer[UInt8], - UnsafePointer[socklen_t], # Args - ](socket, level, option_name, option_value, option_len) + return external_call["getsockopt", c_int](socket, level, option_name, option_value, option_len) fn getsockname( socket: c_int, - address: UnsafePointer[sockaddr], - address_len: UnsafePointer[socklen_t], + address: Reference[sockaddr], + address_len: Reference[socklen_t], ) -> c_int: """Libc POSIX `getsockname` function Reference: https://man7.org/linux/man-pages/man3/getsockname.3p.html - Fn signature: int getsockname(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len). + Fn signature: `int getsockname(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len)`. - Args: socket: A File Descriptor. - address: A pointer to a buffer to store the address of the peer. - address_len: A pointer to the size of the buffer. - Returns: 0 on success, -1 on error. + Args: + socket: A File Descriptor. + address: A reference to a buffer to store the address of the peer. + address_len: A reference to the size of the buffer. + + Returns: + 0 on success, -1 on error. """ - return external_call[ - "getsockname", - c_int, # FnName, RetType - c_int, - UnsafePointer[sockaddr], - UnsafePointer[socklen_t], # Args - ](socket, address, address_len) + return external_call["getsockname", c_int](socket, address, address_len) fn getpeername( sockfd: c_int, - addr: UnsafePointer[sockaddr], - address_len: UnsafePointer[socklen_t], + addr: Reference[sockaddr], + address_len: Reference[socklen_t], ) -> c_int: """Libc POSIX `getpeername` function Reference: https://man7.org/linux/man-pages/man2/getpeername.2.html - Fn signature: int getpeername(int socket, struct sockaddr *restrict addr, socklen_t *restrict address_len). + Fn signature: `int getpeername(int socket, struct sockaddr *restrict addr, socklen_t *restrict address_len)`. - Args: sockfd: A File Descriptor. + Args: + sockfd: A File Descriptor. addr: A pointer to a buffer to store the address of the peer. address_len: A pointer to the size of the buffer. - Returns: 0 on success, -1 on error. + + Returns: + 0 on success, -1 on error. """ - return external_call[ - "getpeername", - c_int, # FnName, RetType - c_int, - UnsafePointer[sockaddr], - UnsafePointer[socklen_t], # Args - ](sockfd, addr, address_len) + return external_call["getpeername", c_int](sockfd, addr, address_len) -fn bind(socket: c_int, address: UnsafePointer[sockaddr], address_len: socklen_t) -> c_int: +fn bind(socket: c_int, address: Reference[sockaddr], address_len: socklen_t) -> c_int: """Libc POSIX `bind` function Reference: https://man7.org/linux/man-pages/man3/bind.3p.html - Fn signature: int bind(int socket, const struct sockaddr *address, socklen_t address_len). + Fn signature: `int bind(int socket, const struct sockaddr *address, socklen_t address_len)`. """ - return external_call["bind", c_int, c_int, UnsafePointer[sockaddr], socklen_t]( # FnName, RetType # Args - socket, address, address_len - ) + return external_call["bind", c_int](socket, address, address_len) + + +fn bind(socket: c_int, address: Reference[sockaddr_in], address_len: socklen_t) -> c_int: + """Libc POSIX `bind` function + Reference: https://man7.org/linux/man-pages/man3/bind.3p.html + Fn signature: `int bind(int socket, const struct sockaddr *address, socklen_t address_len)`. + """ + return external_call["bind", c_int](socket, address, address_len) fn listen(socket: c_int, backlog: c_int) -> c_int: @@ -660,49 +676,66 @@ fn listen(socket: c_int, backlog: c_int) -> c_int: Reference: https://man7.org/linux/man-pages/man3/listen.3p.html Fn signature: int listen(int socket, int backlog). - Args: socket: A File Descriptor. + Args: + socket: A File Descriptor. backlog: The maximum length of the queue of pending connections. - Returns: 0 on success, -1 on error. + + Returns: + 0 on success, -1 on error. """ return external_call["listen", c_int, c_int, c_int](socket, backlog) fn accept( socket: c_int, - address: UnsafePointer[sockaddr], - address_len: UnsafePointer[socklen_t], + address: Reference[sockaddr], + address_len: Reference[socklen_t], ) -> c_int: """Libc POSIX `accept` function Reference: https://man7.org/linux/man-pages/man3/accept.3p.html Fn signature: int accept(int socket, struct sockaddr *restrict address, socklen_t *restrict address_len). - Args: socket: A File Descriptor. + Args: + socket: A File Descriptor. address: A pointer to a buffer to store the address of the peer. address_len: A pointer to the size of the buffer. - Returns: A File Descriptor or -1 in case of failure. + + Returns: + A File Descriptor or -1 in case of failure. """ - return external_call[ - "accept", - c_int, # FnName, RetType - c_int, - UnsafePointer[sockaddr], - UnsafePointer[socklen_t], # Args - ](socket, address, address_len) + return external_call["accept", c_int](socket, address, address_len) + + +fn connect(socket: c_int, address: Reference[sockaddr], address_len: socklen_t) -> c_int: + """Libc POSIX `connect` function + Reference: https://man7.org/linux/man-pages/man3/connect.3p.html + Fn signature: `int connect(int socket, const struct sockaddr *address, socklen_t address_len)`. + + Args: + socket: A File Descriptor. + address: A pointer to the address to connect to. + address_len: The size of the address. + Returns: + 0 on success, -1 on error. + """ + return external_call["connect", c_int](socket, address, address_len) -fn connect(socket: c_int, address: UnsafePointer[sockaddr], address_len: socklen_t) -> c_int: + +fn connect(socket: c_int, address: Reference[sockaddr_in], address_len: socklen_t) -> c_int: """Libc POSIX `connect` function Reference: https://man7.org/linux/man-pages/man3/connect.3p.html - Fn signature: int connect(int socket, const struct sockaddr *address, socklen_t address_len). + Fn signature: `int connect(int socket, const struct sockaddr *address, socklen_t address_len)`. - Args: socket: A File Descriptor. + Args: + socket: A File Descriptor. address: A pointer to the address to connect to. address_len: The size of the address. - Returns: 0 on success, -1 on error. + + Returns: + 0 on success, -1 on error. """ - return external_call["connect", c_int, c_int, UnsafePointer[sockaddr], socklen_t]( # FnName, RetType # Args - socket, address, address_len - ) + return external_call["connect", c_int](socket, address, address_len) fn recv( @@ -713,7 +746,7 @@ fn recv( ) -> c_ssize_t: """Libc POSIX `recv` function Reference: https://man7.org/linux/man-pages/man3/recv.3p.html - Fn signature: ssize_t recv(int socket, void *buffer, size_t length, int flags). + Fn signature: `ssize_t recv(int socket, void *buffer, size_t length, int flags)`. Args: socket: Specifies the socket file descriptor. @@ -725,9 +758,9 @@ fn recv( The number of bytes received or -1 in case of failure. Valid Flags: - MSG_PEEK: Peeks at an incoming message. The data is treated as unread and the next recvfrom() or similar function shall still return this data. - MSG_OOB: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific. - MSG_WAITALL: On SOCK_STREAM sockets this requests that the function block until the full amount of data can be returned. The function may return the smaller amount of data if the socket is a message-based socket, if a signal is caught, if the connection is terminated, if MSG_PEEK was specified, or if an error is pending for the socket. + `MSG_PEEK`: Peeks at an incoming message. The data is treated as unread and the next recvfrom() or similar function shall still return this data. + `MSG_OOB`: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific. + `MSG_WAITALL`: On SOCK_STREAM sockets this requests that the function block until the full amount of data can be returned. The function may return the smaller amount of data if the socket is a message-based socket, if a signal is caught, if the connection is terminated, if MSG_PEEK was specified, or if an error is pending for the socket. """ return external_call[ "recv", @@ -744,14 +777,14 @@ fn recvfrom( buffer: UnsafePointer[UInt8], length: c_size_t, flags: c_int, - address: UnsafePointer[sockaddr], - address_len: UnsafePointer[socklen_t], + address: Reference[sockaddr], + address_len: Reference[socklen_t], ) -> c_ssize_t: """Libc POSIX `recvfrom` function Reference: https://man7.org/linux/man-pages/man3/recvfrom.3p.html - Fn signature: ssize_t recvfrom(int socket, void *restrict buffer, size_t length, + Fn signature: `ssize_t recvfrom(int socket, void *restrict buffer, size_t length, int flags, struct sockaddr *restrict address, - socklen_t *restrict address_len). + socklen_t *restrict address_len)`. Args: socket: Specifies the socket file descriptor. @@ -765,20 +798,11 @@ fn recvfrom( The number of bytes received or -1 in case of failure. Valid Flags: - MSG_PEEK: Peeks at an incoming message. The data is treated as unread and the next recvfrom() or similar function shall still return this data. - MSG_OOB: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific. - MSG_WAITALL: On SOCK_STREAM sockets this requests that the function block until the full amount of data can be returned. The function may return the smaller amount of data if the socket is a message-based socket, if a signal is caught, if the connection is terminated, if MSG_PEEK was specified, or if an error is pending for the socket. + `MSG_PEEK`: Peeks at an incoming message. The data is treated as unread and the next recvfrom() or similar function shall still return this data. + `MSG_OOB`: Requests out-of-band data. The significance and semantics of out-of-band data are protocol-specific. + `MSG_WAITALL`: On SOCK_STREAM sockets this requests that the function block until the full amount of data can be returned. The function may return the smaller amount of data if the socket is a message-based socket, if a signal is caught, if the connection is terminated, if MSG_PEEK was specified, or if an error is pending for the socket. """ - return external_call[ - "recvfrom", - c_ssize_t, - c_int, - UnsafePointer[UInt8], - c_size_t, - c_int, - UnsafePointer[sockaddr], - UnsafePointer[socklen_t], - ](socket, buffer, length, flags, address, address_len) + return external_call["recvfrom", c_ssize_t](socket, buffer, length, flags, address, address_len) fn send( @@ -789,21 +813,24 @@ fn send( ) -> c_ssize_t: """Libc POSIX `send` function Reference: https://man7.org/linux/man-pages/man3/send.3p.html - Fn signature: ssize_t send(int socket, const void *buffer, size_t length, int flags). + Fn signature: `ssize_t send(int socket, const void *buffer, size_t length, int flags)`. - Args: socket: A File Descriptor. + Args: + socket: A File Descriptor. buffer: A pointer to the buffer to send. length: The size of the buffer. flags: Flags to control the behaviour of the function. - Returns: The number of bytes sent or -1 in case of failure. + + Returns: + The number of bytes sent or -1 in case of failure. """ return external_call[ "send", - c_ssize_t, # FnName, RetType + c_ssize_t, c_int, UnsafePointer[UInt8], c_size_t, - c_int, # Args + c_int, ](socket, buffer, length, flags) @@ -812,14 +839,14 @@ fn sendto( message: UnsafePointer[UInt8], length: c_size_t, flags: c_int, - dest_addr: UnsafePointer[sockaddr], + dest_addr: Reference[sockaddr], dest_len: socklen_t, ) -> c_ssize_t: """Libc POSIX `sendto` function Reference: https://man7.org/linux/man-pages/man3/sendto.3p.html - Fn signature: ssize_t sendto(int socket, const void *message, size_t length, + Fn signature: `ssize_t sendto(int socket, const void *message, size_t length, int flags, const struct sockaddr *dest_addr, - socklen_t dest_len). + socklen_t dest_len)`. Args: socket: Specifies the socket file descriptor. @@ -837,60 +864,53 @@ fn sendto( MSG_OOB: Sends out-of-band data on sockets that support out-of-band data. The significance and semantics of out-of-band data are protocol-specific. MSG_NOSIGNAL: Requests not to send the SIGPIPE signal if an attempt to send is made on a stream-oriented socket that is no longer connected. The [EPIPE] error shall still be returned. """ - return external_call[ - "sendto", c_ssize_t, c_int, UnsafePointer[UInt8], c_size_t, c_int, UnsafePointer[sockaddr], socklen_t - ](socket, message, length, flags, dest_addr, dest_len) + return external_call["sendto", c_ssize_t](socket, message, length, flags, dest_addr, dest_len) fn shutdown(socket: c_int, how: c_int) -> c_int: """Libc POSIX `shutdown` function Reference: https://man7.org/linux/man-pages/man3/shutdown.3p.html - Fn signature: int shutdown(int socket, int how). + Fn signature: `int shutdown(int socket, int how)`. - Args: socket: A File Descriptor. + Args: + socket: A File Descriptor. how: How to shutdown the socket. - Returns: 0 on success, -1 on error. + + Returns: + 0 on success, -1 on error. """ - return external_call["shutdown", c_int, c_int, c_int](socket, how) # FnName, RetType # Args + return external_call["shutdown", c_int, c_int, c_int](socket, how) fn getaddrinfo( nodename: UnsafePointer[UInt8], servname: UnsafePointer[UInt8], - hints: UnsafePointer[addrinfo], - res: UnsafePointer[UnsafePointer[addrinfo]], + hints: Reference[addrinfo], + res: Reference[UnsafePointer[addrinfo]], ) -> c_int: """Libc POSIX `getaddrinfo` function Reference: https://man7.org/linux/man-pages/man3/getaddrinfo.3p.html - Fn signature: int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res). + Fn signature: `int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res)`. """ return external_call[ "getaddrinfo", - c_int, # FnName, RetType - UnsafePointer[UInt8], - UnsafePointer[UInt8], - UnsafePointer[addrinfo], # Args - UnsafePointer[UnsafePointer[addrinfo]], # Args + c_int, ](nodename, servname, hints, res) fn getaddrinfo_unix( nodename: UnsafePointer[UInt8], servname: UnsafePointer[UInt8], - hints: UnsafePointer[addrinfo_unix], - res: UnsafePointer[UnsafePointer[addrinfo_unix]], + hints: Reference[addrinfo_unix], + res: Reference[UnsafePointer[addrinfo_unix]], ) -> c_int: """Libc POSIX `getaddrinfo` function Reference: https://man7.org/linux/man-pages/man3/getaddrinfo.3p.html - Fn signature: int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res). + Fn signature: `int getaddrinfo(const char *restrict nodename, const char *restrict servname, const struct addrinfo *restrict hints, struct addrinfo **restrict res)`. """ return external_call[ "getaddrinfo", - c_int, # FnName, RetType - UnsafePointer[UInt8], - UnsafePointer[UInt8], - UnsafePointer[addrinfo_unix], # Args - UnsafePointer[UnsafePointer[addrinfo_unix]], # Args + c_int, ](nodename, servname, hints, res) @@ -899,17 +919,10 @@ fn gai_strerror(ecode: c_int) -> UnsafePointer[UInt8]: Reference: https://man7.org/linux/man-pages/man3/gai_strerror.3p.html Fn signature: const char *gai_strerror(int ecode). - Args: ecode: The error code. - Returns: A pointer to a string describing the error. - """ - return external_call["gai_strerror", UnsafePointer[UInt8], c_int](ecode) # FnName, RetType # Args - - -# fn inet_pton(address_family: Int, address: String) -> Int: -# var ip_buf_size = 4 -# if address_family == AF_INET6: -# ip_buf_size = 16 + Args: + ecode: The error code. -# var ip_buf = UnsafePointer[UInt8].alloc(ip_buf_size) -# var conv_status = inet_pton(rebind[c_int](address_family), to_char_ptr(address), ip_buf) -# return int(ip_buf.bitcast[c_uint]().load()) + Returns: + A pointer to a string describing the error. + """ + return external_call["gai_strerror", UnsafePointer[UInt8], c_int](ecode) diff --git a/src/gojo/unicode/__init__.mojo b/src/gojo/unicode/__init__.mojo new file mode 100644 index 0000000..109ae2d --- /dev/null +++ b/src/gojo/unicode/__init__.mojo @@ -0,0 +1 @@ +from .utf8 import rune_count_in_string, rune_width, string_width, Condition, DEFAULT_CONDITION diff --git a/gojo/unicode/utf8/__init__.mojo b/src/gojo/unicode/utf8/__init__.mojo similarity index 88% rename from gojo/unicode/utf8/__init__.mojo rename to src/gojo/unicode/utf8/__init__.mojo index 6f3d8a9..f905a8f 100644 --- a/gojo/unicode/utf8/__init__.mojo +++ b/src/gojo/unicode/utf8/__init__.mojo @@ -2,5 +2,4 @@ This would not be possible without his help. """ from .runes import rune_count_in_string -from .string import UnicodeString from .width import string_width, rune_width, Condition, DEFAULT_CONDITION diff --git a/gojo/unicode/utf8/runes.mojo b/src/gojo/unicode/utf8/runes.mojo similarity index 69% rename from gojo/unicode/utf8/runes.mojo rename to src/gojo/unicode/utf8/runes.mojo index 5171282..da80db1 100644 --- a/gojo/unicode/utf8/runes.mojo +++ b/src/gojo/unicode/utf8/runes.mojo @@ -2,14 +2,11 @@ This would not be possible without his help. """ -from ...builtins import Rune from algorithm.functional import vectorize from sys.info import simdwidthof -from bit import countl_zero -# alias simd_width_u8 = simdwidthof[DType.uint8]() -alias simd_width_u8 = 1 +alias simd_width_u8 = simdwidthof[DType.uint8]() fn rune_count_in_string(s: String) -> Int: @@ -21,13 +18,12 @@ fn rune_count_in_string(s: String) -> Int: Returns: The number of runes in the string. """ - var p = DTypePointer[DType.uint8](s.unsafe_uint8_ptr()) var string_byte_length = len(s) var result = 0 @parameter fn count[simd_width: Int](offset: Int): - result += int(((p.load[width=simd_width](offset) >> 6) != 0b10).reduce_add()) + result += int(((s.unsafe_ptr().load[width=simd_width](offset) >> 6) != 0b10).cast[DType.uint8]().reduce_add()) vectorize[count, simd_width_u8](string_byte_length) return result diff --git a/gojo/unicode/utf8/table.mojo b/src/gojo/unicode/utf8/table.mojo similarity index 98% rename from gojo/unicode/utf8/table.mojo rename to src/gojo/unicode/utf8/table.mojo index 32717af..a49f81b 100644 --- a/gojo/unicode/utf8/table.mojo +++ b/src/gojo/unicode/utf8/table.mojo @@ -1,3 +1,6 @@ +from collections import InlineArray + + @register_passable("trivial") struct Interval: var first: UInt32 @@ -7,8 +10,12 @@ struct Interval: self.first = first self.last = last + fn __init__(inout self, other: Interval): + self.first = other.first + self.last = other.last + -alias combining = List[Interval]( +alias combining = InlineArray[Interval, 43]( Interval(0x0300, 0x036F), Interval(0x0483, 0x0489), Interval(0x07EB, 0x07F3), @@ -54,7 +61,7 @@ alias combining = List[Interval]( Interval(0x1E8D0, 0x1E8D6), ) -alias doublewidth = List[Interval]( +alias doublewidth = InlineArray[Interval, 116]( Interval(0x1100, 0x115F), Interval(0x231A, 0x231B), Interval(0x2329, 0x232A), @@ -173,7 +180,7 @@ alias doublewidth = List[Interval]( Interval(0x30000, 0x3FFFD), ) -alias ambiguous = List[Interval]( +alias ambiguous = InlineArray[Interval, 179]( Interval(0x00A1, 0x00A1), Interval(0x00A4, 0x00A4), Interval(0x00A7, 0x00A8), @@ -355,7 +362,7 @@ alias ambiguous = List[Interval]( Interval(0x100000, 0x10FFFD), ) -alias narrow = List[Interval]( +alias narrow = InlineArray[Interval, 7]( Interval(0x0020, 0x007E), Interval(0x00A2, 0x00A3), Interval(0x00A5, 0x00A6), @@ -365,7 +372,7 @@ alias narrow = List[Interval]( Interval(0x2985, 0x2986), ) -alias neutral = List[Interval]( +alias neutral = InlineArray[Interval, 826]( Interval(0x0000, 0x001F), Interval(0x007F, 0x00A0), Interval(0x00A9, 0x00A9), @@ -1195,7 +1202,7 @@ alias neutral = List[Interval]( Interval(0xE0020, 0xE007F), ) -alias emoji = List[Interval]( +alias emoji = InlineArray[Interval, 76]( Interval(0x203C, 0x203C), Interval(0x2049, 0x2049), Interval(0x2122, 0x2122), @@ -1274,13 +1281,13 @@ alias emoji = List[Interval]( Interval(0x1FC00, 0x1FFFD), ) -alias private = List[Interval]( +alias private = InlineArray[Interval, 3]( Interval(0x00E000, 0x00F8FF), Interval(0x0F0000, 0x0FFFFD), Interval(0x100000, 0x10FFFD), ) -alias nonprint = List[Interval]( +alias nonprint = InlineArray[Interval, 12]( Interval(0x0000, 0x001F), Interval(0x007F, 0x009F), Interval(0x00AD, 0x00AD), diff --git a/gojo/unicode/utf8/width.mojo b/src/gojo/unicode/utf8/width.mojo similarity index 51% rename from gojo/unicode/utf8/width.mojo rename to src/gojo/unicode/utf8/width.mojo index 8a09ed7..8cdc421 100644 --- a/gojo/unicode/utf8/width.mojo +++ b/src/gojo/unicode/utf8/width.mojo @@ -1,17 +1,25 @@ +from utils import StringSlice +from collections import InlineArray from .table import Interval, narrow, combining, doublewidth, ambiguous, emoji, nonprint -from .string import UnicodeString @value struct Condition: - """Condition have flag EastAsianWidth whether the current locale is CJK or not.""" + """Condition have the flag `EastAsianWidth` enabled if the current locale is `CJK` or not.""" var east_asian_width: Bool var strict_emoji_neutral: Bool fn rune_width(self, r: UInt32) -> Int: """Returns the number of cells in r. - See http://www.unicode.org/reports/tr11/.""" + See http://www.unicode.org/reports/tr11/. + + Args: + r: The rune to calculate the width of. + + Returns: + The printable width of the rune. + """ if r < 0 or r > 0x10FFFF: return 0 @@ -25,42 +33,73 @@ struct Condition: return 1 elif in_table(r, narrow): return 1 - elif in_tables(r, nonprint, combining): + elif in_table(r, nonprint): + return 0 + elif in_table(r, combining): return 0 elif in_table(r, doublewidth): return 2 else: return 1 else: - if in_tables(r, nonprint, combining): + if in_table(r, nonprint): + return 0 + elif in_table(r, combining): return 0 elif in_table(r, narrow): return 1 - elif in_tables(r, ambiguous, doublewidth): + if in_table(r, ambiguous): + return 2 + elif in_table(r, doublewidth): return 2 elif in_table(r, ambiguous) or in_table(r, emoji): return 2 - elif not self.strict_emoji_neutral and in_tables(r, ambiguous, emoji, narrow): + elif not self.strict_emoji_neutral and in_table(r, ambiguous): + return 2 + elif not self.strict_emoji_neutral and in_table(r, emoji): + return 2 + elif not self.strict_emoji_neutral and in_table(r, narrow): return 2 else: return 1 fn string_width(self, s: String) -> Int: - """Return width as you can see.""" + """Return width as you can see. + + Args: + s: The string to calculate the width of. + + Returns: + The printable width of the string. + """ + var width = 0 + for r in s: + width += self.rune_width(ord(r)) + return width + + fn string_width(self, s: StringSlice) -> Int: + """Return width as you can see. + + Args: + s: The string to calculate the width of. + + Returns: + The printable width of the string. + """ var width = 0 - for r in UnicodeString(s): - width += self.rune_width(ord(String(r))) + for r in s: + width += self.rune_width(ord(r)) return width -fn in_tables(r: UInt32, *ts: List[Interval]) -> Bool: +fn in_tables(r: UInt32, *ts: InlineArray[Interval]) -> Bool: for t in ts: if in_table(r, t[]): return True return False -fn in_table(r: UInt32, t: List[Interval]) -> Bool: +fn in_table[size: Int](r: UInt32, t: InlineArray[Interval, size]) -> Bool: if r < t[0].first: return False @@ -80,6 +119,7 @@ fn in_table(r: UInt32, t: List[Interval]) -> Bool: alias DEFAULT_CONDITION = Condition(east_asian_width=False, strict_emoji_neutral=True) +"""The default configuration for calculating the width of runes and strings.""" fn string_width(s: String) -> Int: @@ -94,6 +134,18 @@ fn string_width(s: String) -> Int: return DEFAULT_CONDITION.string_width(s) +fn string_width(s: StringSlice) -> Int: + """Return width as you can see. + + Args: + s: The string to calculate the width of. + + Returns: + The printable width of the string. + """ + return DEFAULT_CONDITION.string_width(s) + + fn rune_width(rune: UInt32) -> Int: """Return width as you can see. diff --git a/src/recipe.yaml b/src/recipe.yaml new file mode 100644 index 0000000..2b6ff2f --- /dev/null +++ b/src/recipe.yaml @@ -0,0 +1,49 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/prefix-dev/recipe-format/main/schema.json + +context: + version: "13.4.2" + +package: + name: "gojo" + version: 0.1.8 + +source: + - path: . + - path: ../LICENSE + - path: ../test + +build: + script: + - mkdir -p ${PREFIX}/lib/mojo + - magic run mojo package gojo + - cp gojo.mojopkg ${PREFIX}/lib/mojo/gojo.mojopkg + +requirements: + run: + - max >= 24.5.0,<24.6.0 + +tests: + - script: + # commands to run to test the package. If any of the commands + # returns with an error code, the test is considered failed. + # if I don't copy everything into a test dir, mojo test runs the tests in the .magic dir and fails :| + - mkdir -p test + - mv test_*.mojo test/ + - mv data test/ + - mv gojo.mojopkg test/ + - magic run mojo test test + + files: + # Extra files to be copied to the test directory from the "work directory" + source: + - gojo.mojopkg + - mojoproject.toml + - data/ + - test_*.mojo + +about: + homepage: https://github.com/thatstoasty/gojo + license: MIT + license_file: LICENSE + summary: Experiments in porting over Golang stdlib into Mojo. https://github.com/thatstoasty/gojo + repository: https://github.com/thatstoasty/gojo diff --git a/test/data/test.txt b/test/data/test.txt new file mode 100644 index 0000000..e56e15b --- /dev/null +++ b/test/data/test.txt @@ -0,0 +1 @@ +12345 diff --git a/tests/data/test_big_file.txt b/test/data/test_big_file.txt similarity index 99% rename from tests/data/test_big_file.txt rename to test/data/test_big_file.txt index f5a0a5a..2d06e5d 100644 --- a/tests/data/test_big_file.txt +++ b/test/data/test_big_file.txt @@ -17,4 +17,4 @@ Quis imperdiet massa tincidunt nunc pulvinar. Eget lorem dolor sed viverra ipsum Egestas congue quisque egestas diam in arcu. Et magnis dis parturient montes nascetur. Dolor sit amet consectetur adipiscing. Ut tristique et egestas quis ipsum. Turpis egestas sed tempus urna. Euismod elementum nisi quis eleifend quam adipiscing vitae. Nisl vel pretium lectus quam id leo. Proin sed libero enim sed faucibus turpis. Mi quis hendrerit dolor magna eget. Suspendisse ultrices gravida dictum fusce ut placerat. Habitasse platea dictumst quisque sagittis purus. Curabitur gravida arcu ac tortor. Commodo nulla facilisi nullam vehicula ipsum a. Sagittis vitae et leo duis ut diam quam. Pretium nibh ipsum consequat nisl vel pretium lectus. Arcu cursus euismod quis viverra. Risus nullam eget felis eget nunc lobortis. Aliquet eget sit amet tellus cras adipiscing enim eu. In mollis nunc sed id semper risus in. Sed egestas egestas fringilla phasellus faucibus scelerisque. A diam sollicitudin tempor id. Lacus laoreet non curabitur gravida. A diam maecenas sed enim ut. Id nibh tortor id aliquet lectus proin. Eget mi proin sed libero. Maecenas pharetra convallis posuere morbi leo urna. Id donec ultrices tincidunt arcu. Urna et pharetra pharetra massa massa ultricies. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Nec nam aliquam sem et tortor consequat id porta. In vitae turpis massa sed. Praesent semper feugiat nibh sed pulvinar. Nascetur ridiculus mus mauris vitae ultricies. Ut aliquam purus sit amet luctus venenatis lectus magna fringilla. Sit amet mattis vulputate enim. Orci a scelerisque purus semper eget duis at tellus at. -Platea dictumst vestibulum rhoncus est pellentesque elit ullamcorper dignissim cras. Gravida arcu ac tortor dignissim convallis aenean et tortor. Ornare suspendisse sed nisi lacus sed viverra tellus in. Turpis egestas maecenas pharetra convallis posuere morbi. Elementum nisi quis eleifend quam adipiscing vitae. Maecenas sed enim ut sem. Feugiat in fermentum posuere urna nec tincidunt praesent. Suspendisse sed nisi lacus sed. Scelerisque in dictum non consectetur. Mauris commodo quis imperdiet massa. \ No newline at end of file +Platea dictumst vestibulum rhoncus est pellentesque elit ullamcorper dignissim cras. Gravida arcu ac tortor dignissim convallis aenean et tortor. Ornare suspendisse sed nisi lacus sed viverra tellus in. Turpis egestas maecenas pharetra convallis posuere morbi. Elementum nisi quis eleifend quam adipiscing vitae. Maecenas sed enim ut sem. Feugiat in fermentum posuere urna nec tincidunt praesent. Suspendisse sed nisi lacus sed. Scelerisque in dictum non consectetur. Mauris commodo quis imperdiet massa. diff --git a/tests/data/test_multiple_lines.txt b/test/data/test_multiple_lines.txt similarity index 80% rename from tests/data/test_multiple_lines.txt rename to test/data/test_multiple_lines.txt index 8882524..1dd092d 100644 --- a/tests/data/test_multiple_lines.txt +++ b/test/data/test_multiple_lines.txt @@ -2,4 +2,4 @@ 22222 33333 44444 -55555 \ No newline at end of file +55555 diff --git a/test/data/test_write.txt b/test/data/test_write.txt new file mode 100644 index 0000000..e56e15b --- /dev/null +++ b/test/data/test_write.txt @@ -0,0 +1 @@ +12345 diff --git a/test/test_bufio_reader.mojo b/test/test_bufio_reader.mojo new file mode 100644 index 0000000..a259d90 --- /dev/null +++ b/test/test_bufio_reader.mojo @@ -0,0 +1,94 @@ +import gojo.bytes +import gojo.bufio +import gojo.io +from gojo.bytes import to_string +from gojo.strings import StringBuilder +import testing + + +def test_read(): + # Create a reader from a string buffer + var reader = bufio.Reader(bytes.Buffer("Hello")) + + # Read the buffer into and then add more to it. + var dest = List[UInt8, True](capacity=256) + _ = reader.read(dest) + dest.extend(String(" World!").as_bytes()) + + testing.assert_equal(to_string(dest), "Hello World!") + + +def test_read_all(): + var reader = bufio.Reader(bytes.Reader("0123456789")) + var result = io.read_all(reader) + testing.assert_equal(to_string(result[0]), "0123456789") + + +def test_write_to(): + var reader = bufio.Reader(bytes.Buffer("0123456789")) + + # Create a new writer containing the content "Hello World" + var writer = bytes.Buffer("Hello World") + + # Write the content of the reader to the writer + _ = reader.write_to(writer) + + # Check if the content of the writer is "Hello World0123456789" + testing.assert_equal(str(writer), "Hello World0123456789") + + +def test_read_and_unread_byte(): + # Read the first byte from the reader. + var reader = bufio.Reader(bytes.Buffer("Hello, World!")) + var result = reader.read_byte() + testing.assert_equal(int(result[0]), int(72)) + var post_read_position = reader.read_pos + + # Unread the first byte from the reader. Read position should be moved back by 1 + _ = reader.unread_byte() + testing.assert_equal(reader.read_pos, post_read_position - 1) + + +def test_read_slice(): + var reader = bufio.Reader(bytes.Buffer("0123456789")) + var result = reader.read_slice(ord("5")) + print(result[0][0]) + testing.assert_equal(to_string(result[0]), "012345") + + +def test_read_bytes(): + var reader = bufio.Reader(bytes.Buffer("01234\n56789")) + var result = reader.read_bytes(ord("\n")) + testing.assert_equal(to_string(result[0]), "01234\n") + + +def test_read_line(): + var reader = bufio.Reader(bytes.Buffer("01234\n56789")) + var line: List[UInt8, True] + var b: Bool + line, b = reader.read_line() + testing.assert_equal(to_string(line), "01234") + + +def test_peek(): + var reader = bufio.Reader(bytes.Buffer("01234\n56789")) + + # Peek doesn't advance the reader, so we should see the same content twice. + var result = reader.peek(5) + testing.assert_equal(to_string(result[0]), "01234") + var second_result = reader.peek(5) + testing.assert_equal(to_string(second_result[0]), "01234") + + +def test_discard(): + var reader = bufio.Reader(bytes.Buffer("0123456789")) + var result = reader.discard(5) + testing.assert_equal(result[0], 5) + + # Peek doesn't advance the reader, so we should see the same content twice. + var second_result = reader.peek(5) + testing.assert_equal(to_string(second_result[0]), "56789") + + +def main(): + test_read_slice() diff --git a/test/test_bufio_scanner.mojo b/test/test_bufio_scanner.mojo new file mode 100644 index 0000000..db6a53d --- /dev/null +++ b/test/test_bufio_scanner.mojo @@ -0,0 +1,120 @@ +import testing +import pathlib +from gojo.bytes import buffer +from gojo.io import FileWrapper +from gojo.bufio import Reader, Scanner, scan_words, scan_bytes, scan_runes + + +def test_scan_words(): + # Create a reader from a string buffer + var buf = buffer.Buffer("Testing🔥 this🔥 string🔥!") + + # Create a scanner from the reader + var scanner = Scanner[scan_words](buf^) + var expected_results = List[String]("Testing🔥", "this🔥", "string🔥!") + var i = 0 + while scanner.scan(): + testing.assert_equal(scanner.current_token(), expected_results[i]) + i += 1 + + testing.assert_equal(i, len(expected_results)) + + +def test_scan_lines(): + # Create a reader from a string buffer + var buf = buffer.Buffer("Testing\nthis\nstring!") + + # Create a scanner from the reader + var scanner = Scanner(buf^) + var expected_results = List[String]("Testing", "this", "string!") + var i = 0 + while scanner.scan(): + testing.assert_equal(scanner.current_token(), expected_results[i]) + i += 1 + + testing.assert_equal(i, len(expected_results)) + + +def scan_no_newline_test(test_case: String, result_lines: List[String]): + # Create a reader from a string buffer + var buf = buffer.Buffer(test_case) + + # Create a scanner from the reader + var scanner = Scanner(buf^) + var i = 0 + while scanner.scan(): + testing.assert_equal(scanner.current_token(), result_lines[i]) + i += 1 + + +def test_scan_lines_no_newline(): + var test_case = "abcdefghijklmn\nopqrstuvwxyz" + var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz") + + scan_no_newline_test(test_case, result_lines) + + +def test_scan_lines_cr_no_newline(): + var test_case = "abcdefghijklmn\nopqrstuvwxyz\r" + var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz") + + scan_no_newline_test(test_case, result_lines) + + +def test_scan_lines_empty_final_line(): + var test_case = "abcdefghijklmn\nopqrstuvwxyz\n\n" + var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz", "") + + scan_no_newline_test(test_case, result_lines) + + +def test_scan_lines_cr_empty_final_line(): + var test_case = "abcdefghijklmn\nopqrstuvwxyz\n\r" + var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz", "") + + scan_no_newline_test(test_case, result_lines) + + +def test_scan_bytes(): + var test_cases = List[String]("", "a", "abc", "abc def\n\t\tgh ") + for test_case in test_cases: + # Create a reader from a string buffer + var buf = buffer.Buffer(buf=test_case[].as_bytes()) + + # Create a scanner from the reader + var scanner = Scanner[split=scan_bytes](buf^) + var j = 0 + while scanner.scan(): + testing.assert_equal(scanner.current_token(), test_case[][j]) + j += 1 + + testing.assert_equal(j, len(test_case[])) + + +def test_file_wrapper_scanner(): + var test_file = str(pathlib._dir_of_current_file()) + "/data/test_multiple_lines.txt" + var file = FileWrapper(test_file, "r") + + # Create a scanner from the reader + var scanner = Scanner(file^) + var expected_results = List[String]("11111", "22222", "33333", "44444", "55555") + var i = 0 + while scanner.scan(): + testing.assert_equal(scanner.current_token(), expected_results[i]) + i += 1 + testing.assert_equal(i, len(expected_results)) + + +def test_scan_runes(): + # Create a reader from a string buffer + var buf = buffer.Buffer("🔪🔥🔪🔥") + + # Create a scanner from the reader + var scanner = Scanner[split=scan_runes](buf^) + + var expected_results = List[String]("🔪", "🔥", "🔪", "🔥") + var i = 0 + while scanner.scan(): + testing.assert_equal(scanner.current_token(), expected_results[i]) + i += 1 + testing.assert_equal(i, len(expected_results)) diff --git a/test/test_bufio_writer.mojo b/test/test_bufio_writer.mojo new file mode 100644 index 0000000..4dc62d2 --- /dev/null +++ b/test/test_bufio_writer.mojo @@ -0,0 +1,105 @@ +import gojo.bytes +import gojo.bufio +import gojo.io +from gojo.builtins.bytes import to_string +from gojo.strings import StringBuilder +import testing + + +def test_write(): + # Create a new Buffer Writer and use it to create the buffered Writer + var writer = bufio.Writer(bytes.Buffer()) + + # Write the content from src to the buffered writer's internal buffer and flush it to the Buffer Writer. + var src = String("0123456789").as_bytes_slice() + var result = writer.write(src) + _ = writer.flush() + + testing.assert_equal(result[0], 10) + testing.assert_equal(str(writer.writer), "0123456789") + + +def test_several_writes(): + # Create a new Buffer Writer and use it to create the buffered Writer + var writer = bufio.Writer(bytes.Buffer(capacity=1100)) + + # Write the content from src to the buffered writer's internal buffer and flush it to the Buffer Writer. + var src = String("0123456789") + for _ in range(100): + _ = writer.write_string(src) + _ = writer.flush() + + testing.assert_equal(len(writer.writer), 1000) + var text = str(writer.writer) + testing.assert_equal(text[0], "0") + testing.assert_equal(text[999], "9") + + +def test_several_writes_small_buffer(): + # Create a new Buffer Writer and use it to create the buffered Writer + var writer = bufio.Writer(bytes.Buffer(capacity=1000), capacity=16) + + # Write the content from src to the buffered writer's internal buffer and flush it to the Buffer Writer. + var src = String("0123456789") + for _ in range(100): + _ = writer.write_string(src) + _ = writer.flush() + + var text = str(writer.writer) + testing.assert_equal(len(text), 1000) + testing.assert_equal(text[0], "0") + testing.assert_equal(text[999], "9") + + +def test_big_write(): + # Create a new Buffer Writer and use it to create the buffered Writer + var writer = bufio.Writer(bytes.Buffer()) + + # Build a string larger than the size of the Bufio struct's internal buffer. + var builder = StringBuilder(capacity=5000) + for _ in range(500): + _ = builder.write_string("0123456789") + + # When writing, it should bypass the Bufio struct's buffer and write directly to the underlying bytes buffer writer. So, no need to flush. + var text = str(builder) + _ = writer.write(text.as_bytes_slice()) + testing.assert_equal(len(writer.writer), 5000) + testing.assert_equal(text[0], "0") + testing.assert_equal(text[4999], "9") + + +def test_write_byte(): + # Create a new Buffer Writer and use it to create the buffered Writer + var writer = bufio.Writer(bytes.Buffer("Hello")) + + # Write a byte with the value of 32 to the writer's internal buffer and flush it to the Buffer Writer. + var result = writer.write_byte(32) + _ = writer.flush() + + testing.assert_equal(result[0], 1) + testing.assert_equal(str(writer.writer), "Hello ") + + +def test_write_string(): + # Create a new Buffer Writer and use it to create the buffered Writer + var writer = bufio.Writer(bytes.Buffer("Hello")) + + # Write a string to the writer's internal buffer and flush it to the Buffer Writer. + var result = writer.write_string(" World!") + _ = writer.flush() + + testing.assert_equal(result[0], 7) + testing.assert_equal(str(writer.writer), "Hello World!") + + +def test_read_from(): + # Create a new Buffer Writer and use it to create the buffered Writer + var writer = bufio.Writer(bytes.Buffer("Hello")) + + # Read from a ReaderFrom struct into the Buffered Writer's internal buffer and flush it to the Buffer Writer. + var reader_from = bytes.Buffer(" World!") + var result = writer.read_from(reader_from) + _ = writer.flush() + + testing.assert_equal(int(result[0]), 7) + testing.assert_equal(str(writer.writer), "Hello World!") diff --git a/test/test_bytes.mojo b/test/test_bytes.mojo new file mode 100644 index 0000000..d529f4e --- /dev/null +++ b/test/test_bytes.mojo @@ -0,0 +1,8 @@ +from utils import Span +from gojo.bytes import index_byte +import testing + + +def test_index_byte(): + var bytes = String("hello\n").as_bytes() + testing.assert_equal(index_byte(Span(bytes), ord("\n")), 5) diff --git a/test/test_bytes_buffer.mojo b/test/test_bytes_buffer.mojo new file mode 100644 index 0000000..9b12dc4 --- /dev/null +++ b/test/test_bytes_buffer.mojo @@ -0,0 +1,96 @@ +from gojo.bytes.buffer import Buffer +from gojo.bytes import to_string +import testing + + +def test_read(): + var s = "Hello World!" + var buf = Buffer(s) + var dest = List[UInt8, True](capacity=16) + _ = buf.read(dest) + testing.assert_equal(to_string(dest), s) + + +def test_read_byte(): + var buf = Buffer("Hello World!") + var result = buf.read_byte() + testing.assert_equal(int(result[0]), 72) + + +def test_unread_byte(): + var buf = Buffer("Hello World!") + var result = buf.read_byte() + testing.assert_equal(int(result[0]), 72) + testing.assert_equal(buf.offset, 1) + + _ = buf.unread_byte() + testing.assert_equal(buf.offset, 0) + + +def test_read_bytes(): + var buf = Buffer("Hello World!") + var result = buf.read_bytes(ord("o")) + testing.assert_equal(to_string(result[0]), "Hello") + + +def test_read_slice(): + var buf = Buffer("Hello World!") + var result = buf.read_slice(ord("o")) + var text = List[UInt8, True](result[0]) + text.append(0) + testing.assert_equal(String(text), "Hello") + + +def test_read_string(): + var buf = Buffer("Hello World!") + var result = buf.read_string(ord("o")) + testing.assert_equal(result[0], "Hello") + + +def test_next(): + var buf = Buffer("Hello World!") + var text = List[UInt8, True](buf.next(5)) + text.append(0) + testing.assert_equal(String(text), "Hello") + + +def test_write(): + var buf = Buffer(List[UInt8, True](capacity=16)) + _ = buf.write("Hello World!".as_bytes_slice()) + testing.assert_equal(str(buf), "Hello World!") + + +def test_muliple_writes(): + var buf = Buffer(List[UInt8, True](capacity=1200)) + var text = "Hello World!".as_bytes_slice() + for _ in range(100): + _ = buf.write(text) + + testing.assert_equal(len(buf), 1200) + var result = str(buf) + testing.assert_equal(result[0], "H") + testing.assert_equal(result[1199], "!") + + +def test_write_string(): + var buf = Buffer(List[UInt8, True](capacity=16)) + _ = buf.write_string("\nGoodbye World!") + testing.assert_equal(str(buf), String("\nGoodbye World!")) + + +def test_write_byte(): + var buf = Buffer(List[UInt8, True](capacity=16)) + _ = buf.write_byte(0x41) + testing.assert_equal(str(buf), String("A")) + + +def test_buffer(): + var b = "Hello World!" + var buf = Buffer(b) + testing.assert_equal(str(buf), b) + + buf = Buffer(String("Goodbye World!")) + testing.assert_equal(str(buf), "Goodbye World!") + + buf = Buffer() + testing.assert_equal(str(buf), "") diff --git a/test/test_bytes_reader.mojo b/test/test_bytes_reader.mojo new file mode 100644 index 0000000..271520f --- /dev/null +++ b/test/test_bytes_reader.mojo @@ -0,0 +1,124 @@ +import gojo.bytes +import gojo.io +from gojo.bytes import to_string +import testing + + +def test_read(): + var reader = bytes.Reader("0123456789") + var dest = List[UInt8, True](capacity=16) + _ = reader.read(dest) + testing.assert_equal(to_string(dest), "0123456789") + + # Test negative seek + alias NEGATIVE_POSITION_ERROR = "bytes.Reader.seek: negative position" + var position: Int + var err: Error + position, err = reader.seek(-1, io.SEEK_START) + + if not err: + raise Error("Expected error not raised while testing negative seek.") + + if str(err) != NEGATIVE_POSITION_ERROR: + raise err + + testing.assert_equal(str(err), NEGATIVE_POSITION_ERROR) + + +def test_read_after_big_seek(): + var reader = bytes.Reader("0123456789") + _ = reader.seek(123456789, io.SEEK_START) + var dest = List[UInt8, True](capacity=16) + + var bytes_read: Int + var err: Error + bytes_read, err = reader.read(dest) + if not err: + raise Error("Expected error not raised while testing big seek.") + + if str(err) != str(io.EOF): + raise err + + testing.assert_equal(str(err), str(io.EOF)) + + +def test_read_at(): + var reader = bytes.Reader("0123456789") + + var dest = List[UInt8, True](capacity=16) + var pos = reader.read_at(dest, 0) + testing.assert_equal(to_string(dest), "0123456789") + + dest = List[UInt8, True](capacity=16) + pos = reader.read_at(dest, 1) + testing.assert_equal(to_string(dest), "123456789") + + +def test_seek(): + var reader = bytes.Reader("0123456789") + var pos = reader.seek(5, io.SEEK_START) + + var dest = List[UInt8, True](capacity=16) + _ = reader.read(dest) + testing.assert_equal(to_string(dest), "56789") + + # Test SEEK_END relative seek + pos = reader.seek(-2, io.SEEK_END) + dest = List[UInt8, True](capacity=16) + _ = reader.read(dest) + testing.assert_equal(to_string(dest), "89") + + # Test SEEK_CURRENT relative seek (should be at the end of the reader, ie [:-4]) + pos = reader.seek(-4, io.SEEK_CURRENT) + dest = List[UInt8, True](capacity=16) + _ = reader.read(dest) + testing.assert_equal(to_string(dest), "6789") + + +def test_read_all(): + var reader = bytes.Reader("0123456789") + var result = io.read_all(reader) + testing.assert_equal(to_string(result[0]), "0123456789") + + +def test_write_to(): + # Create a new reader containing the content "0123456789" + var reader = bytes.Reader("0123456789") + + # Create a new writer containing the content "Hello World" + var w = bytes.Buffer("Hello World") + + # Write the content of the reader to the writer + _ = reader.write_to(w) + + # Check if the content of the writer is "Hello World0123456789" + testing.assert_equal(str(w), String("Hello World0123456789")) + + +def test_read_and_unread_byte(): + var reader = bytes.Reader("0123456789") + + # Read the first byte from the reader. + var byte: UInt8 + var err: Error + byte, err = reader.read_byte() + testing.assert_equal(int(byte), 48) + var post_read_position = reader.index + + # Unread the first byte from the reader. Read position should be moved back by 1 + err = reader.unread_byte() + if err: + raise err + testing.assert_equal(int(reader.index), int(post_read_position - 1)) + + +def test_unread_byte_at_beginning(): + var reader = bytes.Reader("0123456789") + + alias AT_BEGINNING_ERROR = "bytes.Reader.unread_byte: at beginning of buffer." + + var err = reader.unread_byte() + if str(err) != AT_BEGINNING_ERROR: + raise err + + testing.assert_equal(str(err), AT_BEGINNING_ERROR) diff --git a/test/test_file.mojo b/test/test_file.mojo new file mode 100644 index 0000000..1e88f91 --- /dev/null +++ b/test/test_file.mojo @@ -0,0 +1,56 @@ +from gojo.io import read_all, FileWrapper +from gojo.bytes import to_string +import pathlib +import testing + + +def test_read(): + var test_file = str(pathlib._dir_of_current_file()) + "/data/test.txt" + var file = FileWrapper(test_file, "r") + var dest = List[UInt8, True](capacity=16) + _ = file.read(dest) + testing.assert_equal(to_string(dest), "12345\n") + + +def test_read_all(): + var test_file = str(pathlib._dir_of_current_file()) + "/data/test_big_file.txt" + var file = FileWrapper(test_file, "r") + var result = file.read_all() + var bytes = result[0] + testing.assert_equal(len(bytes), 15359) + bytes.append(0) + + with open(test_file, "r") as f: + var expected = f.read() + testing.assert_equal(String(bytes), expected) + + +def test_io_read_all(): + var test_file = str(pathlib._dir_of_current_file()) + "/data/test_big_file.txt" + var file = FileWrapper(test_file, "r") + var result = read_all(file) + var bytes = result[0] + testing.assert_equal(len(bytes), 15359) + bytes.append(0) + + with open(test_file, "r") as f: + var expected = f.read() + testing.assert_equal(String(bytes), expected) + + +def test_read_byte(): + var test_file = str(pathlib._dir_of_current_file()) + "/data/test.txt" + var file = FileWrapper(test_file, "r") + testing.assert_equal(int(file.read_byte()[0]), 49) + + +def test_write(): + var test_file = str(pathlib._dir_of_current_file()) + "/data/test_write.txt" + var file = FileWrapper(test_file, "w") + var content = "12345" + var bytes_written = file.write(content.as_bytes_slice()) + testing.assert_equal(bytes_written[0], 5) + + with open(test_file, "r") as f: + var expected = f.read() + testing.assert_equal(content, expected) diff --git a/tests/test_fmt.mojo b/test/test_fmt.mojo similarity index 56% rename from tests/test_fmt.mojo rename to test/test_fmt.mojo index a7eaa21..484afdd 100644 --- a/tests/test_fmt.mojo +++ b/test/test_fmt.mojo @@ -1,9 +1,8 @@ -from tests.wrapper import MojoTest from gojo.fmt import sprintf, printf +import testing -fn test_sprintf() raises: - var test = MojoTest("Testing sprintf") +def test_sprintf(): var s = sprintf( "Hello, %s. I am %d years old. More precisely, I am %f years old. It is %t that I like Mojo!", String("world"), @@ -11,20 +10,16 @@ fn test_sprintf() raises: Float64(29.5), True, ) - test.assert_equal( + testing.assert_equal( s, "Hello, world. I am 29 years old. More precisely, I am 29.5 years old. It is True that I like Mojo!", ) - s = sprintf("This is a number: %d. In base 16: %x. In base 16 upper: %X.", 42, 42, 42) - test.assert_equal(s, "This is a number: 42. In base 16: 2a. In base 16 upper: 2A.") - s = sprintf("Hello %s", String("world").as_bytes()) - test.assert_equal(s, "Hello world") + testing.assert_equal(s, "Hello world") -fn test_printf() raises: - var test = MojoTest("Testing printf") +def test_printf(): printf( "Hello, %s. I am %d years old. More precisely, I am %f years old. It is %t that I like Mojo!", String("world"), @@ -32,8 +27,3 @@ fn test_printf() raises: Float64(29.5), True, ) - - -fn main() raises: - test_sprintf() - # test_printf() diff --git a/test/test_get_addr.mojo b/test/test_get_addr.mojo new file mode 100644 index 0000000..1861b35 --- /dev/null +++ b/test/test_get_addr.mojo @@ -0,0 +1,86 @@ +# from gojo.net import Socket, TCPAddr, get_ip_address, listen_tcp, dial_tcp +# from gojo.syscall import SocketOptions, ProtocolFamily + + +# def test_dial(): +# # Connect to example.com on port 80 and send a GET request +# var connection = dial_tcp("tcp", TCPAddr(get_ip_address("www.example.com"), 80)) +# var bytes_written: Int = 0 +# var err = Error() +# bytes_written, err = connection.write( +# String("GET / HTTP/1.1\r\nHost: www.example.com\r\nConnection: close\r\n\r\n").as_bytes_slice() +# ) +# if err: +# raise err + +# if bytes_written == 0: +# print("No bytes sent to peer.") +# return + +# # Read the response from the connection +# var response = List[UInt8, True](capacity=4096) +# var bytes_read: Int = 0 +# bytes_read, err = connection.read(response) +# if err: +# raise err + +# if bytes_read == 0: +# print("No bytes received from peer.") +# return + +# print(String(response)) + +# # Cleanup the connection +# err = connection.close() +# if err: +# raise err + + +# def test_listener(): +# var listener = listen_tcp("tcp", TCPAddr("0.0.0.0", 8081)) +# while True: +# var conn = listener.accept() +# print("Accepted connection from", str(conn.remote_address())) +# var err = conn.close() +# if err: +# raise err + + +# def test_stuff(): +# # TODO: context manager not working yet +# # with Socket() as socket: +# # socket.bind("0.0.0.0", 8080) + +# var socket = Socket(protocol=ProtocolFamily.PF_UNIX) +# socket.bind("0.0.0.0", 8080) +# _ = socket.connect(get_ip_address("www.example.com"), 80) +# print("File number", socket.file_no()) +# var local = socket.get_sock_name() +# var remote = socket.get_peer_name() +# print("Local address", str(local), str(socket.local_address)) +# print("Remote address", str(remote[0]), str(socket.remote_address)) +# socket.set_socket_option(SocketOptions.SO_REUSEADDR, 1) +# print("REUSE_ADDR value", socket.get_socket_option(SocketOptions.SO_REUSEADDR)) +# # var timeout = 30 +# # socket.set_timeout(timeout) +# # print(socket.get_timeout()) +# socket.shutdown() +# print("closing") +# var err = socket.close() +# print("closed") +# if err: +# print("err returned") +# raise err +# # var option_value = socket.get_sock_opt(SocketOptions.SO_REUSEADDR) +# # print(option_value) +# # socket.connect(self.ip, self.port) +# # socket.send(message) +# # var response = socket.receive() # TODO: call receive until all data is fetched, receive should also just return bytes +# # socket.shutdown() +# # socket.close() + + +# # def main(): +# # # test_dial() +# # # test_listener() +# # test_stuff() diff --git a/test/test_std.mojo b/test/test_std.mojo new file mode 100644 index 0000000..7c66c43 --- /dev/null +++ b/test/test_std.mojo @@ -0,0 +1,9 @@ +from gojo.syscall import FD +from gojo.io import STDWriter +import testing + + +def test_writer(): + # var test = MojoTest("Testing STDWriter.write") + var writer = STDWriter[FD.STDOUT]() + _ = writer.write_string("") diff --git a/test/test_strings_reader.mojo b/test/test_strings_reader.mojo new file mode 100644 index 0000000..b62fda5 --- /dev/null +++ b/test/test_strings_reader.mojo @@ -0,0 +1,82 @@ +from gojo.strings import StringBuilder, Reader +from gojo.bytes import to_string +import gojo.io +import testing + + +def test_read(): + var example: String = "Hello, World!" + var reader = Reader("Hello, World!") + + # Test reading from the reader. + var buffer = List[UInt8, True](capacity=16) + var bytes_read = reader.read(buffer) + buffer.append(0) + + testing.assert_equal(bytes_read[0], len(example)) + testing.assert_equal(String(buffer), "Hello, World!") + + +def test_read_slice(): + var example: String = "Hello, World!" + var reader = Reader("Hello, World!") + + # Test reading from the reader. + var buffer = List[UInt8, True](capacity=16) + var bytes_read = reader.read(buffer) + buffer.append(0) + + testing.assert_equal(bytes_read[0], len(example)) + testing.assert_equal(String(buffer), "Hello, World!") + + +def test_read_at(): + var example: String = "Hello, World!" + var reader = Reader("Hello, World!") + + # Test reading from the reader. + var buffer = List[UInt8, True](capacity=128) + var bytes_read = reader.read_at(buffer, 7) + buffer.append(0) + + testing.assert_equal(bytes_read[0], len(example[7:])) + testing.assert_equal(String(buffer), "World!") + + +def test_seek(): + var reader = Reader("Hello, World!") + + # Seek to the middle of the reader. + var position = reader.seek(5, io.SEEK_START) + testing.assert_equal(int(position[0]), 5) + + +def test_read_and_unread_byte(): + var example: String = "Hello, World!" + var reader = Reader("Hello, World!") + + # Read the first byte from the reader. + var byte = reader.read_byte() + testing.assert_equal(int(byte[0]), 72) + + # Unread the first byte from the reader. Remaining bytes to be read should be the same as the length of the example string. + _ = reader.unread_byte() + testing.assert_equal(len(reader), len(example)) + + +def test_write_to(): + var example: String = "Hello, World!" + var reader = Reader("Hello, World!") + + # Write from the string reader to a StringBuilder. + var builder = StringBuilder() + _ = reader.write_to(builder) + testing.assert_equal(str(builder), example) + + +def test_read_until_delimiter(): + var reader = Reader("Hello, World!") + + # Test reading from the reader. + var result = reader.read_until_delimiter(",") + testing.assert_equal(result, "Hello") diff --git a/tests/test_strings_stringbuilder.mojo b/test/test_strings_stringbuilder.mojo similarity index 59% rename from tests/test_strings_stringbuilder.mojo rename to test/test_strings_stringbuilder.mojo index ae52f1a..506280d 100644 --- a/tests/test_strings_stringbuilder.mojo +++ b/test/test_strings_stringbuilder.mojo @@ -1,56 +1,41 @@ -from tests.wrapper import MojoTest from gojo.strings import StringBuilder +import testing -fn test_write_string() raises: - var test = MojoTest("Testing strings.StringBuilder.write_string") - +def test_write_string(): # Create a string from the builder by writing strings to it. var builder = StringBuilder() for _ in range(3): _ = builder.write_string("Lorem ipsum dolor sit amet ") - test.assert_equal( + testing.assert_equal( str(builder), "Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet ", ) -fn test_big_write(): - var test = MojoTest("Testing strings.StringBuilder.write_string with big Write") - +def test_big_write(): # Create a string from the builder by writing strings to it. var builder = StringBuilder(capacity=1) _ = builder.write_string("Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet") - test.assert_equal( + testing.assert_equal( str(builder), "Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet Lorem ipsum dolor sit amet", ) -fn test_write() raises: - var test = MojoTest("Testing strings.StringBuilder.write") - +def test_write(): # Create a string from the builder by writing bytes to it. var builder = StringBuilder() _ = builder.write(String("Hello").as_bytes_slice()) - test.assert_equal(str(builder), "Hello") + testing.assert_equal(str(builder), "Hello") -fn test_write_byte() raises: - var test = MojoTest("Testing strings.StringBuilder.write_byte") - +def test_write_byte(): # Create a string from the builder by writing bytes to it. var builder = StringBuilder() _ = builder.write_byte(ord("H")) - test.assert_equal(str(builder), "H") - - -fn main() raises: - test_write_string() - test_write() - test_write_byte() - test_big_write() + testing.assert_equal(str(builder), "H") diff --git a/test/test_unicode_width.mojo b/test/test_unicode_width.mojo new file mode 100644 index 0000000..c2496a1 --- /dev/null +++ b/test/test_unicode_width.mojo @@ -0,0 +1,29 @@ +from gojo.unicode import string_width, rune_width +import testing + + +def test_string_width_east_asian(): + var s: String = "𡨸漢𡨸漢" + + testing.assert_equal(string_width(s), 8, msg="The length of 𡨸漢𡨸漢 should be 8.") + for r in s: + testing.assert_equal(rune_width(ord(r)), 2, msg="The width of each character should be 2.") + testing.assert_equal(string_width(r), 2, msg="The width of each character should be 2.") + + +def test_string_width_ascii(): + var ascii: String = "Hello, World!" + + testing.assert_equal(string_width(ascii), 13) + for r in ascii: + testing.assert_equal(rune_width(ord(r)), 1, msg="The width of each character should be 1.") + testing.assert_equal(string_width(r), 1, msg="The width of each character should be 1.") + + +def test_string_width_emoji(): + var s: String = "🔥🔥🔥🔥" + + testing.assert_equal(string_width(s), 8) + for r in s: + testing.assert_equal(rune_width(ord(r)), 2, msg="The width of each character should be 2.") + testing.assert_equal(string_width(r), 2, msg="The width of each character should be 2.") diff --git a/tests/__init__.mojo b/tests/__init__.mojo deleted file mode 100644 index e69de29..0000000 diff --git a/tests/data/test.txt b/tests/data/test.txt deleted file mode 100644 index bd41cba..0000000 --- a/tests/data/test.txt +++ /dev/null @@ -1 +0,0 @@ -12345 \ No newline at end of file diff --git a/tests/data/test_big_read.csv b/tests/data/test_big_read.csv deleted file mode 100644 index a1cb236..0000000 --- a/tests/data/test_big_read.csv +++ /dev/null @@ -1,201 +0,0 @@ -a,b,c -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here diff --git a/tests/data/test_read.csv b/tests/data/test_read.csv deleted file mode 100644 index 385ea37..0000000 --- a/tests/data/test_read.csv +++ /dev/null @@ -1,3 +0,0 @@ -Hello,World,I am here -Goodbye,World,I was here -Lorem,Ipsum,Dolor \ No newline at end of file diff --git a/tests/data/test_write.csv b/tests/data/test_write.csv deleted file mode 100644 index ee02d02..0000000 --- a/tests/data/test_write.csv +++ /dev/null @@ -1,11 +0,0 @@ -a,b,c -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here -Hello,World,I am here diff --git a/tests/data/test_write.txt b/tests/data/test_write.txt deleted file mode 100644 index bd41cba..0000000 --- a/tests/data/test_write.txt +++ /dev/null @@ -1 +0,0 @@ -12345 \ No newline at end of file diff --git a/tests/test_bufio.mojo b/tests/test_bufio.mojo deleted file mode 100644 index 8d02f3b..0000000 --- a/tests/test_bufio.mojo +++ /dev/null @@ -1,240 +0,0 @@ -from tests.wrapper import MojoTest -from gojo.bytes import buffer -from gojo.builtins.bytes import to_string -from gojo.bufio import Reader, Scanner, scan_words, scan_bytes, Writer, new_writer, new_reader -from gojo.io import read_all, FileWrapper -from gojo.strings import StringBuilder - - -fn test_read(): - var test = MojoTest("Testing bufio.Reader.read") - - # Create a reader from a string buffer - var s: String = "Hello" - var buf = buffer.new_buffer(s) - var reader = Reader(buf^) - - # Read the buffer into List[UInt8] and then add more to List[UInt8] - var dest = List[UInt8](capacity=256) - _ = reader.read(dest) - dest.extend(String(" World!").as_bytes()) - - test.assert_equal(to_string(dest), "Hello World!") - - -fn test_read_all(): - var test = MojoTest("Testing bufio.Reader with io.read_all") - - var s: String = "0123456789" - var buf = buffer.new_reader(s) - var reader = Reader(buf^) - var result = read_all(reader) - var bytes = result[0] - bytes.append(0) - test.assert_equal(String(bytes), "0123456789") - - -# fn test_write_to(): -# var test = MojoTest("Testing bufio.Reader.write_to") - -# var buf = buffer.new_buffer("0123456789") -# var reader = Reader(buf^) - -# # Create a new writer containing the content "Hello World" -# var writer = buffer.new_buffer("Hello World") - -# # Write the content of the reader to the writer -# _ = reader.write_to(writer) - -# # Check if the content of the writer is "Hello World0123456789" -# test.assert_equal(str(writer), "Hello World0123456789") - - -fn test_read_and_unread_byte(): - var test = MojoTest("Testing bufio.Reader.read_byte and bufio.Reader.unread_byte") - - # Read the first byte from the reader. - var example: String = "Hello, World!" - var buf = buffer.new_buffer(example^) - var reader = Reader(buf^) - var result = reader.read_byte() - test.assert_equal(int(result[0]), int(72)) - var post_read_position = reader.read_pos - - # Unread the first byte from the reader. Read position should be moved back by 1 - _ = reader.unread_byte() - test.assert_equal(reader.read_pos, post_read_position - 1) - - -fn test_read_slice(): - var test = MojoTest("Testing bufio.Reader.read_slice") - var buf = buffer.new_buffer("0123456789") - var reader = Reader(buf^) - - var result = reader.read_slice(ord("5")) - test.assert_equal(to_string(result[0]), "012345") - - -fn test_read_bytes(): - var test = MojoTest("Testing bufio.Reader.read_bytes") - var buf = buffer.new_buffer("01234\n56789") - var reader = Reader(buf^) - - var result = reader.read_bytes(ord("\n")) - test.assert_equal(to_string(result[0]), "01234") - - -fn test_read_line(): - var test = MojoTest("Testing bufio.Reader.read_line") - var buf = buffer.new_buffer("01234\n56789") - var reader = Reader(buf^) - - var line: List[UInt8] - var b: Bool - line, b = reader.read_line() - test.assert_equal(String(line), "01234") - - -fn test_peek(): - var test = MojoTest("Testing bufio.Reader.peek") - var buf = buffer.new_buffer("01234\n56789") - var reader = Reader(buf^) - - # Peek doesn't advance the reader, so we should see the same content twice. - var result = reader.peek(5) - var second_result = reader.peek(5) - test.assert_equal(to_string(result[0]), "01234") - test.assert_equal(to_string(second_result[0]), "01234") - - -fn test_discard(): - var test = MojoTest("Testing bufio.Reader.discard") - var buf = buffer.new_buffer("0123456789") - var reader = Reader(buf^) - - var result = reader.discard(5) - test.assert_equal(result[0], 5) - - # Peek doesn't advance the reader, so we should see the same content twice. - var second_result = reader.peek(5) - test.assert_equal(to_string(second_result[0]), "56789") - - -fn test_write(): - var test = MojoTest("Testing bufio.Writer.write and flush") - - # Create a new List[UInt8] Buffer Writer and use it to create the buffered Writer - # var buf = buffer.new_buffer() - var writer = new_writer(buffer.new_buffer()) - # var writer = Writer(buf^) - - # Write the content from src to the buffered writer's internal buffer and flush it to the List[UInt8] Buffer Writer. - var src = String("0123456789").as_bytes() - var result = writer.write(src) - _ = writer.flush() - - test.assert_equal(result[0], 10) - test.assert_equal(str(writer.writer), "0123456789") - - -fn test_several_writes(): - var test = MojoTest("Testing several bufio.Writer.write") - - # Create a new List[UInt8] Buffer Writer and use it to create the buffered Writer - var buf = buffer.new_buffer() - var writer = Writer(buf^) - - # Write the content from src to the buffered writer's internal buffer and flush it to the List[UInt8] Buffer Writer. - var src = String("0123456789").as_bytes() - for _ in range(100): - _ = writer.write(src) - _ = writer.flush() - - test.assert_equal(len(writer.writer), 1000) - var text = str(writer.writer) - test.assert_equal(text[0], "0") - test.assert_equal(text[999], "9") - - -fn test_big_write(): - var test = MojoTest("Testing a big bufio.Writer.write") - - # Create a new List[UInt8] Buffer Writer and use it to create the buffered Writer - var buf = buffer.new_buffer() - var writer = Writer(buf^) - - # Build a string larger than the size of the Bufio struct's internal buffer. - var builder = StringBuilder(capacity=5000) - for _ in range(500): - _ = builder.write_string("0123456789") - - # When writing, it should bypass the Bufio struct's buffer and write directly to the underlying bytes buffer writer. So, no need to flush. - var text = str(builder) - _ = writer.write(text.as_bytes()) - test.assert_equal(len(writer.writer), 5000) - test.assert_equal(text[0], "0") - test.assert_equal(text[4999], "9") - - -fn test_write_byte(): - var test = MojoTest("Testing bufio.Writer.write_byte") - - # Create a new List[UInt8] Buffer Writer and use it to create the buffered Writer - var buf = buffer.new_buffer("Hello") - var writer = Writer(buf^) - - # Write a byte with the value of 32 to the writer's internal buffer and flush it to the List[UInt8] Buffer Writer. - var result = writer.write_byte(32) - _ = writer.flush() - - test.assert_equal(result[0], 1) - test.assert_equal(str(writer.writer), "Hello ") - - -fn test_write_string(): - var test = MojoTest("Testing bufio.Writer.write_string") - - # Create a new List[UInt8] Buffer Writer and use it to create the buffered Writer - var buf = buffer.new_buffer("Hello") - var writer = Writer(buf^) - - # Write a string to the writer's internal buffer and flush it to the List[UInt8] Buffer Writer. - var result = writer.write_string(" World!") - _ = writer.flush() - - test.assert_equal(result[0], 7) - test.assert_equal(str(writer.writer), "Hello World!") - - -fn test_read_from(): - var test = MojoTest("Testing bufio.Writer.read_from") - - # Create a new List[UInt8] Buffer Writer and use it to create the buffered Writer - var buf = buffer.new_buffer("Hello") - var writer = Writer(buf^) - - # Read from a ReaderFrom struct into the Buffered Writer's internal buffer and flush it to the List[UInt8] Buffer Writer. - var src = String(" World!").as_bytes() - var reader_from = buffer.new_buffer(src) - var result = writer.read_from(reader_from) - _ = writer.flush() - - test.assert_equal(int(result[0]), 7) - test.assert_equal(str(writer.writer), "Hello World!") - - -# TODO: Add big file read/write to make sure buffer usage is correct -fn main(): - test_read() - test_read_all() - # test_write_to() - test_read_and_unread_byte() - test_read_slice() - test_peek() - test_discard() - test_write() - test_several_writes() - test_big_write() - test_write_byte() - test_write_string() - test_read_from() diff --git a/tests/test_bufio_scanner.mojo b/tests/test_bufio_scanner.mojo deleted file mode 100644 index 7a035b0..0000000 --- a/tests/test_bufio_scanner.mojo +++ /dev/null @@ -1,152 +0,0 @@ -from tests.wrapper import MojoTest -from gojo.bytes import buffer -from gojo.io import FileWrapper -from gojo.bufio import Reader, Scanner, scan_words, scan_bytes, scan_runes - - -fn test_scan_words(): - var test = MojoTest("Testing bufio.scan_words") - - # Create a reader from a string buffer - var s: String = "Testing this string!" - var buf = buffer.new_buffer(s) - var r = Reader(buf^) - - # Create a scanner from the reader - var scanner = Scanner[split=scan_words](r^) - - var expected_results = List[String]("Testing", "this", "string!") - var i = 0 - - while scanner.scan(): - test.assert_equal(scanner.current_token(), expected_results[i]) - i += 1 - - -fn test_scan_lines(): - var test = MojoTest("Testing bufio.scan_lines") - - # Create a reader from a string buffer - var s: String = "Testing\nthis\nstring!" - var buf = buffer.new_buffer(s) - var r = Reader(buf^) - - # Create a scanner from the reader - var scanner = Scanner(r^) - - var expected_results = List[String]("Testing", "this", "string!") - var i = 0 - - while scanner.scan(): - test.assert_equal(scanner.current_token(), expected_results[i]) - i += 1 - - -fn scan_no_newline_test(test_case: String, result_lines: List[String], test: MojoTest): - # Create a reader from a string buffer - var buf = buffer.new_buffer(test_case) - var r = Reader(buf^) - - # Create a scanner from the reader - var scanner = Scanner(r^) - var i = 0 - while scanner.scan(): - test.assert_equal(scanner.current_token(), result_lines[i]) - i += 1 - - -fn test_scan_lines_no_newline(): - var test = MojoTest("Testing bufio.scan_lines with no final newline") - var test_case = "abcdefghijklmn\nopqrstuvwxyz" - var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz") - - scan_no_newline_test(test_case, result_lines, test) - - -fn test_scan_lines_cr_no_newline(): - var test = MojoTest("Testing bufio.scan_lines with no final newline but carriage return") - var test_case = "abcdefghijklmn\nopqrstuvwxyz\r" - var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz") - - scan_no_newline_test(test_case, result_lines, test) - - -fn test_scan_lines_empty_final_line(): - var test = MojoTest("Testing bufio.scan_lines with an empty final line") - var test_case = "abcdefghijklmn\nopqrstuvwxyz\n\n" - var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz", "") - - scan_no_newline_test(test_case, result_lines, test) - - -fn test_scan_lines_cr_empty_final_line(): - var test = MojoTest("Testing bufio.scan_lines with an empty final line and carriage return") - var test_case = "abcdefghijklmn\nopqrstuvwxyz\n\r" - var result_lines = List[String]("abcdefghijklmn", "opqrstuvwxyz", "") - - scan_no_newline_test(test_case, result_lines, test) - - -fn test_scan_bytes(): - var test = MojoTest("Testing bufio.scan_bytes") - - var test_cases = List[String]("", "a", "abc", "abc def\n\t\tgh ") - - for i in range(len(test_cases)): - var test_case = test_cases[i] - # Create a reader from a string buffer - var buf = buffer.new_buffer(test_case) - var reader = Reader(buf^) - - # Create a scanner from the reader - var scanner = Scanner[split=scan_bytes](reader^) - - var j = 0 - while scanner.scan(): - test.assert_equal(scanner.current_token(), test_case[j]) - j += 1 - - -fn test_file_wrapper_scanner() raises: - var test = MojoTest("testing io.FileWrapper and bufio.Scanner") - var file = FileWrapper("tests/data/test_multiple_lines.txt", "r") - - # Create a scanner from the reader - var scanner = Scanner(file^) - var expected_results = List[String]("11111", "22222", "33333", "44444", "55555") - var i = 0 - - while scanner.scan(): - test.assert_equal(scanner.current_token(), expected_results[i]) - i += 1 - - -fn test_scan_runes(): - var test = MojoTest("Testing bufio.scan_runes") - - # Create a reader from a string buffer - var s: String = "🔪🔥🔪" - var buf = buffer.new_buffer(s) - var r = Reader(buf^) - - # Create a scanner from the reader - var scanner = Scanner[split=scan_runes](r^) - - var expected_results = List[String]("🔪", "🔥", "🔪") - var i = 0 - - while scanner.scan(): - test.assert_equal(scanner.current_token(), expected_results[i]) - i += 1 - - -fn main() raises: - test_scan_words() - test_scan_lines() - test_scan_lines_no_newline() - test_scan_lines_cr_no_newline() - test_scan_lines_empty_final_line() - test_scan_lines_cr_empty_final_line() - test_scan_bytes() - test_file_wrapper_scanner() - test_scan_runes() diff --git a/tests/test_builtins_bytes.mojo b/tests/test_builtins_bytes.mojo deleted file mode 100644 index ae8e851..0000000 --- a/tests/test_builtins_bytes.mojo +++ /dev/null @@ -1,24 +0,0 @@ -from tests.wrapper import MojoTest -from testing import testing -from gojo.builtins.bytes import Byte, index_byte - - -fn test_index_byte(): - var test = MojoTest("Testing builtins.List[Byte] slice") - var bytes = String("hello\n").as_bytes() - test.assert_equal(index_byte(bytes, ord("\n")), 5) - - -fn test_size_and_len(): - var test = MojoTest("Testing builtins.List[Byte].size and builtins.List[Byte].__len__") - var bytes = List[Byte](capacity=16) - - # Size is the number of bytes used, len is the number of bytes allocated. - test.assert_equal(bytes.capacity, 16) - test.assert_equal(len(bytes), 0) - - -fn main(): - # test_slice_out_of_bounds() - test_index_byte() - test_size_and_len() diff --git a/tests/test_bytes_buffer.mojo b/tests/test_bytes_buffer.mojo deleted file mode 100644 index 75e0bc2..0000000 --- a/tests/test_bytes_buffer.mojo +++ /dev/null @@ -1,121 +0,0 @@ -from tests.wrapper import MojoTest -from gojo.bytes import new_buffer -from gojo.bytes.buffer import Buffer - - -fn test_read() raises: - var test = MojoTest("Testing bytes.Buffer.read") - var s: String = "Hello World!" - var buf = new_buffer(s) - var dest = List[UInt8](capacity=16) - _ = buf.read(dest) - dest.append(0) - test.assert_equal(String(dest), s) - - -fn test_read_byte() raises: - var test = MojoTest("Testing bytes.Buffer.read_byte") - var s: String = "Hello World!" - var buf = new_buffer(s) - var result = buf.read_byte() - test.assert_equal(int(result[0]), 72) - - -fn test_unread_byte() raises: - var test = MojoTest("Testing bytes.Buffer.unread_byte") - var s: String = "Hello World!" - var buf = new_buffer(s) - var result = buf.read_byte() - test.assert_equal(int(result[0]), 72) - test.assert_equal(buf.offset, 1) - - _ = buf.unread_byte() - test.assert_equal(buf.offset, 0) - - -fn test_read_bytes() raises: - var test = MojoTest("Testing bytes.Buffer.read_bytes") - var s: String = "Hello World!" - var buf = new_buffer(s) - var result = buf.read_bytes(ord("o")) - var text = result[0] - text.append(0) - test.assert_equal(String(text), String("Hello")) - - -fn test_read_slice() raises: - var test = MojoTest("Testing bytes.Buffer.read_slice") - var s: String = "Hello World!" - var buf = new_buffer(s) - var result = buf.read_slice(ord("o")) - var text = List[UInt8](result[0]) - text.append(0) - test.assert_equal(String(text), String("Hello")) - - -fn test_read_string() raises: - var test = MojoTest("Testing bytes.Buffer.read_string") - var s: String = "Hello World!" - var buf = new_buffer(s) - var result = buf.read_string(ord("o")) - test.assert_equal(String(result[0]), String("Hello")) - - -fn test_next() raises: - var test = MojoTest("Testing bytes.Buffer.next") - var buf = new_buffer("Hello World!") - var text = List[UInt8](buf.next(5)) - text.append(0) - test.assert_equal(String(text), String("Hello")) - - -fn test_write() raises: - var test = MojoTest("Testing bytes.Buffer.write") - var b = List[UInt8](capacity=16) - var buf = new_buffer(b^) - _ = buf.write(String("Hello World!").as_bytes_slice()) - test.assert_equal(str(buf), String("Hello World!")) - - -fn test_write_string() raises: - var test = MojoTest("Testing bytes.Buffer.write_string") - var b = List[UInt8](capacity=16) - var buf = new_buffer(b^) - - _ = buf.write_string("\nGoodbye World!") - test.assert_equal(str(buf), String("\nGoodbye World!")) - - -fn test_write_byte() raises: - var test = MojoTest("Testing bytes.Buffer.write_byte") - var b = List[UInt8](capacity=16) - var buf = new_buffer(b^) - _ = buf.write_byte(0x41) - test.assert_equal(str(buf), String("A")) - - -fn test_new_buffer() raises: - var test = MojoTest("Testing bytes.new_buffer") - var b = String("Hello World!").as_bytes() - var buf = new_buffer(b^) - test.assert_equal(str(buf), "Hello World!") - - buf = new_buffer("Goodbye World!") - test.assert_equal(str(buf), "Goodbye World!") - - buf = new_buffer() - test.assert_equal(str(buf), "") - - -fn main() raises: - test_read() - test_read_byte() - test_unread_byte() - test_read_slice() - test_read_bytes() - test_read_string() - test_next() - test_write() - test_write_string() - test_write_byte() - test_new_buffer() diff --git a/tests/test_bytes_reader.mojo b/tests/test_bytes_reader.mojo deleted file mode 100644 index 1075061..0000000 --- a/tests/test_bytes_reader.mojo +++ /dev/null @@ -1,152 +0,0 @@ -from tests.wrapper import MojoTest -from gojo.bytes import reader, buffer -import gojo.io - - -fn test_read() raises: - var test = MojoTest("Testing bytes.Reader.read") - var reader = reader.new_reader("0123456789") - var dest = List[UInt8](capacity=16) - _ = reader.read(dest) - dest.append(0) - test.assert_equal(String(dest), "0123456789") - - # Test negative seek - alias NEGATIVE_POSITION_ERROR = "bytes.Reader.seek: negative position" - var position: Int - var err: Error - position, err = reader.seek(-1, io.SEEK_START) - - if not err: - raise Error("Expected error not raised while testing negative seek.") - - if str(err) != NEGATIVE_POSITION_ERROR: - raise err - - test.assert_equal(str(err), NEGATIVE_POSITION_ERROR) - - -fn test_read_after_big_seek() raises: - var test = MojoTest("Testing bytes.Reader.read after big seek") - var reader = reader.new_reader("0123456789") - _ = reader.seek(123456789, io.SEEK_START) - var dest = List[UInt8](capacity=16) - - var bytes_read: Int - var err: Error - bytes_read, err = reader.read(dest) - if not err: - raise Error("Expected error not raised while testing big seek.") - - if str(err) != str(io.EOF): - raise err - - test.assert_equal(str(err), str(io.EOF)) - - -fn test_read_at() raises: - var test = MojoTest("Testing bytes.Reader.read_at") - var reader = reader.new_reader("0123456789") - - var dest = List[UInt8](capacity=16) - var pos = reader.read_at(dest, 0) - dest.append(0) - test.assert_equal(String(dest), "0123456789") - - dest = List[UInt8](capacity=16) - pos = reader.read_at(dest, 1) - dest.append(0) - test.assert_equal(String(dest), "123456789") - - -fn test_seek() raises: - var test = MojoTest("Testing bytes.Reader.seek") - var reader = reader.new_reader("0123456789") - var pos = reader.seek(5, io.SEEK_START) - - var dest = List[UInt8](capacity=16) - _ = reader.read(dest) - dest.append(0) - test.assert_equal(String(dest), "56789") - - # Test SEEK_END relative seek - pos = reader.seek(-2, io.SEEK_END) - dest = List[UInt8](capacity=16) - _ = reader.read(dest) - dest.append(0) - test.assert_equal(String(dest), "89") - - # Test SEEK_CURRENT relative seek (should be at the end of the reader, ie [:-4]) - pos = reader.seek(-4, io.SEEK_CURRENT) - dest = List[UInt8](capacity=16) - _ = reader.read(dest) - dest.append(0) - test.assert_equal(String(dest), "6789") - - -fn test_read_all() raises: - var test = MojoTest("Testing io.read_all with bytes.Reader") - var reader = reader.new_reader("0123456789") - var result = io.read_all(reader) - var bytes = result[0] - bytes.append(0) - test.assert_equal(String(bytes), "0123456789") - - -# fn test_write_to() raises: -# var test = MojoTest("Testing bytes.Reader.write_to") - -# # Create a new reader containing the content "0123456789" -# var reader = reader.new_reader("0123456789") - -# # Create a new writer containing the content "Hello World" -# var test_string: String = "Hello World" -# var w = buffer.new_buffer(test_string) - -# # Write the content of the reader to the writer -# _ = reader.write_to(w) - -# # Check if the content of the writer is "Hello World0123456789" -# test.assert_equal(str(w), String("Hello World0123456789")) - - -fn test_read_and_unread_byte() raises: - var test = MojoTest("Testing bytes.Reader.read_byte and bytes.Reader.unread_byte") - var reader = reader.new_reader("0123456789") - - # Read the first byte from the reader. - var byte: UInt8 - var err: Error - byte, err = reader.read_byte() - test.assert_equal(int(byte), 48) - var post_read_position = reader.index - - # Unread the first byte from the reader. Read position should be moved back by 1 - err = reader.unread_byte() - if err: - raise err - test.assert_equal(int(reader.index), int(post_read_position - 1)) - - -fn test_unread_byte_at_beginning() raises: - var test = MojoTest("Testing bytes.Reader.unread_byte before reading any bytes") - var reader = reader.new_reader("0123456789") - - alias AT_BEGINNING_ERROR = "bytes.Reader.unread_byte: at beginning of slice" - - var err = reader.unread_byte() - if str(err) != AT_BEGINNING_ERROR: - raise err - - test.assert_equal(str(err), AT_BEGINNING_ERROR) - - -fn main() raises: - test_read() - test_read_after_big_seek() - test_read_at() - test_read_all() - test_read_and_unread_byte() - test_unread_byte_at_beginning() - test_seek() - # test_write_to() diff --git a/tests/test_file.mojo b/tests/test_file.mojo deleted file mode 100644 index 610d3bd..0000000 --- a/tests/test_file.mojo +++ /dev/null @@ -1,63 +0,0 @@ -from tests.wrapper import MojoTest -from gojo.io import read_all, FileWrapper - - -fn test_read() raises: - var test = MojoTest("Testing FileWrapper.read") - var file = FileWrapper("tests/data/test.txt", "r") - var dest = List[UInt8](capacity=16) - _ = file.read(dest) - dest.append(0) - test.assert_equal(String(dest), "12345") - - -fn test_read_all() raises: - var test = MojoTest("Testing FileWrapper.read_all") - var file = FileWrapper("tests/data/test_big_file.txt", "r") - var result = file.read_all() - var bytes = result[0] - test.assert_equal(len(bytes), 15358) - bytes.append(0) - - with open("tests/data/test_big_file.txt", "r") as f: - var expected = f.read() - test.assert_equal(String(bytes), expected) - - -fn test_io_read_all() raises: - var test = MojoTest("Testing io.read_all with FileWrapper") - var file = FileWrapper("tests/data/test_big_file.txt", "r") - var result = read_all(file) - var bytes = result[0] - test.assert_equal(len(bytes), 15358) - bytes.append(0) - - with open("tests/data/test_big_file.txt", "r") as f: - var expected = f.read() - test.assert_equal(String(bytes), expected) - - -fn test_read_byte() raises: - var test = MojoTest("Testing FileWrapper.read_byte") - var file = FileWrapper("tests/data/test.txt", "r") - test.assert_equal(int(file.read_byte()[0]), 49) - - -fn test_write() raises: - var test = MojoTest("Testing FileWrapper.write") - var file = FileWrapper("tests/data/test_write.txt", "w") - var content = String("12345") - var bytes_written = file.write(content.as_bytes()) - test.assert_equal(bytes_written[0], 5) - - with open("tests/data/test_write.txt", "r") as f: - var expected = f.read() - test.assert_equal(content, expected) - - -fn main() raises: - test_read() - test_read_all() - test_io_read_all() - test_read_byte() - test_write() diff --git a/tests/test_get_addr.mojo b/tests/test_get_addr.mojo deleted file mode 100644 index 2f481d2..0000000 --- a/tests/test_get_addr.mojo +++ /dev/null @@ -1,86 +0,0 @@ -from gojo.net import Socket, TCPAddr, get_ip_address, listen_tcp, dial_tcp -from gojo.syscall import SocketOptions, ProtocolFamily - - -fn test_dial() raises: - # Connect to example.com on port 80 and send a GET request - var connection = dial_tcp("tcp", TCPAddr(get_ip_address("www.example.com"), 80)) - var bytes_written: Int = 0 - var err = Error() - bytes_written, err = connection.write( - String("GET / HTTP/1.1\r\nHost: www.example.com\r\nConnection: close\r\n\r\n").as_bytes_slice() - ) - if err: - raise err - - if bytes_written == 0: - print("No bytes sent to peer.") - return - - # Read the response from the connection - var response = List[UInt8](capacity=4096) - var bytes_read: Int = 0 - bytes_read, err = connection.read(response) - if err: - raise err - - if bytes_read == 0: - print("No bytes received from peer.") - return - - print(String(response)) - - # Cleanup the connection - err = connection.close() - if err: - raise err - - -fn test_listener() raises: - var listener = listen_tcp("tcp", TCPAddr("0.0.0.0", 8081)) - while True: - var conn = listener.accept() - print("Accepted connection from", conn.remote_address()) - var err = conn.close() - if err: - raise err - - -# fn test_stuff() raises: -# # TODO: context manager not working yet -# # with Socket() as socket: -# # socket.bind("0.0.0.0", 8080) - -# var socket = Socket(protocol=ProtocolFamily.PF_UNIX) -# socket.bind("0.0.0.0", 8080) -# socket.connect(get_ip_address("www.example.com"), 80) -# print("File number", socket.file_no()) -# var local = socket.get_sock_name() -# var remote = socket.get_peer_name() -# print("Local address", str(local), socket.local_address) -# print("Remote address", str(remote), socket.remote_address) -# socket.set_socket_option(SocketOptions.SO_REUSEADDR, 1) -# print("REUSE_ADDR value", socket.get_socket_option(SocketOptions.SO_REUSEADDR)) -# var timeout = 30 -# # socket.set_timeout(timeout) -# # print(socket.get_timeout()) -# socket.shutdown() -# print("closing") -# var err = socket.close() -# print("closed") -# if err: -# print("err returned") -# raise err -# # var option_value = socket.get_sock_opt(SocketOptions.SO_REUSEADDR) -# # print(option_value) -# # socket.connect(self.ip, self.port) -# # socket.send(message) -# # var response = socket.receive() # TODO: call receive until all data is fetched, receive should also just return bytes -# # socket.shutdown() -# # socket.close() - - -fn main() raises: - # test_stuff() - # test_listener() - test_dial() diff --git a/tests/test_std.mojo b/tests/test_std.mojo deleted file mode 100644 index fa5aaf5..0000000 --- a/tests/test_std.mojo +++ /dev/null @@ -1,13 +0,0 @@ -from tests.wrapper import MojoTest -from gojo.syscall import FD -from gojo.io import STDWriter - - -fn test_writer() raises: - var test = MojoTest("Testing STDWriter.write") - var writer = STDWriter[FD.STDOUT]() - _ = writer.write_string("") - - -fn main() raises: - test_writer() diff --git a/tests/test_strings_reader.mojo b/tests/test_strings_reader.mojo deleted file mode 100644 index feb5be0..0000000 --- a/tests/test_strings_reader.mojo +++ /dev/null @@ -1,88 +0,0 @@ -from tests.wrapper import MojoTest -from gojo.strings import StringBuilder, Reader, new_reader -import gojo.io - - -fn test_read() raises: - var test = MojoTest("Testing strings.Reader.read") - var example: String = "Hello, World!" - var reader = new_reader("Hello, World!") - - # Test reading from the reader. - var buffer = List[UInt8](capacity=16) - var bytes_read = reader.read(buffer) - buffer.append(0) - - test.assert_equal(bytes_read[0], len(example)) - test.assert_equal(String(buffer), "Hello, World!") - - -fn test_read_slice() raises: - var test = MojoTest("Testing strings.Reader.read") - var example: String = "Hello, World!" - var reader = new_reader("Hello, World!") - - # Test reading from the reader. - var buffer = List[UInt8](capacity=16) - var bytes_read = reader.read(buffer) - buffer.append(0) - - test.assert_equal(bytes_read[0], len(example)) - test.assert_equal(String(buffer), "Hello, World!") - - -fn test_read_at() raises: - var test = MojoTest("Testing strings.Reader.read_at") - var example: String = "Hello, World!" - var reader = new_reader("Hello, World!") - - # Test reading from the reader. - var buffer = List[UInt8](capacity=128) - var bytes_read = reader.read_at(buffer, 7) - buffer.append(0) - - test.assert_equal(bytes_read[0], len(example[7:])) - test.assert_equal(String(buffer), "World!") - - -fn test_seek() raises: - var test = MojoTest("Testing strings.Reader.seek") - var reader = new_reader("Hello, World!") - - # Seek to the middle of the reader. - var position = reader.seek(5, io.SEEK_START) - test.assert_equal(int(position[0]), 5) - - -fn test_read_and_unread_byte() raises: - var test = MojoTest("Testing strings.Reader.read_byte and strings.Reader.unread_byte") - var example: String = "Hello, World!" - var reader = new_reader("Hello, World!") - - # Read the first byte from the reader. - var byte = reader.read_byte() - test.assert_equal(int(byte[0]), 72) - - # Unread the first byte from the reader. Remaining bytes to be read should be the same as the length of the example string. - _ = reader.unread_byte() - test.assert_equal(len(reader), len(example)) - - -# fn test_write_to() raises: -# var test = MojoTest("Testing strings.Reader.write_to") -# var example: String = "Hello, World!" -# var reader = new_reader("Hello, World!") - -# # Write from the string reader to a StringBuilder. -# var builder = StringBuilder() -# _ = reader.write_to(builder) -# test.assert_equal(str(builder), example) - - -fn main() raises: - test_read() - test_read_at() - test_seek() - test_read_and_unread_byte() - # test_write_to() - test_read_slice() diff --git a/tests/test_unicode_string.mojo b/tests/test_unicode_string.mojo deleted file mode 100644 index c434bcd..0000000 --- a/tests/test_unicode_string.mojo +++ /dev/null @@ -1,23 +0,0 @@ -from gojo.unicode import UnicodeString -from tests.wrapper import MojoTest - - -fn test_unicode_string(): - var test = MojoTest("Testing unicode.UnicodeString") - var s = UnicodeString("𡨸漢𡨸漢") - test.assert_equal(s.bytecount(), 14) - test.assert_equal(len(s), 4) - - var i = 0 - var results = List[String]("𡨸", "漢", "𡨸", "漢") - for c in s: - test.assert_equal(String(c), results[i]) - i += 1 - - test.assert_equal(String(s[:1]), "𡨸") - test.assert_equal(String(s[:2]), "𡨸漢") - # test.assert_equal(String(s[:-1]), "𡨸漢𡨸漢") - - -fn main(): - test_unicode_string() diff --git a/tests/test_unicode_width.mojo b/tests/test_unicode_width.mojo deleted file mode 100644 index c223038..0000000 --- a/tests/test_unicode_width.mojo +++ /dev/null @@ -1,20 +0,0 @@ -from gojo.unicode import string_width, rune_width, UnicodeString -from tests.wrapper import MojoTest - - -fn test_string_width(): - var test = MojoTest("Testing unicode.string_width and unicode.rune_width") - var ascii = "Hello, World!" - var s: String = "𡨸漢𡨸漢" - test.assert_equal(string_width(s), 8) - test.assert_equal(string_width(ascii), 13) - - for r in UnicodeString(s): - test.assert_equal(rune_width(ord(String(r))), 2) - - for r in UnicodeString(ascii): - test.assert_equal(rune_width(ord(String(r))), 1) - - -fn main(): - test_string_width() diff --git a/tests/wrapper.mojo b/tests/wrapper.mojo deleted file mode 100644 index bdf3f87..0000000 --- a/tests/wrapper.mojo +++ /dev/null @@ -1,38 +0,0 @@ -from testing import testing - - -@value -struct MojoTest: - """ - A utility struct for testing. - """ - - var test_name: String - - fn __init__(inout self, test_name: String): - self.test_name = test_name - print("# " + test_name) - - fn assert_true(self, cond: Bool, message: String = ""): - try: - if message == "": - testing.assert_true(cond) - else: - testing.assert_true(cond, message) - except e: - print(e) - - fn assert_false(self, cond: Bool, message: String = ""): - try: - if message == "": - testing.assert_false(cond) - else: - testing.assert_false(cond, message) - except e: - print(e) - - fn assert_equal[T: testing.Testable](self, left: T, right: T): - try: - testing.assert_equal(left, right) - except e: - print(e)