FlatFile.pm 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. package Trog::Data::FlatFile;
  2. use strict;
  3. use warnings;
  4. no warnings 'experimental';
  5. use feature qw{signatures};
  6. use Carp qw{confess};
  7. use JSON::MaybeXS;
  8. use File::Slurper;
  9. use File::Copy;
  10. use Mojo::File;
  11. use IO::AIO 2;
  12. use parent qw{Trog::DataModule};
  13. our $datastore = 'data/files';
  14. sub lang { 'Perl Regex in Quotemeta' }
  15. sub help { 'https://perldoc.perl.org/functions/quotemeta.html' }
  16. =head1 Trog::Data::FlatFile
  17. This data model has multiple drawbacks, but is "good enough" for most low-content and few editor applications.
  18. You can only post once per second due to it storing each post as a file named after the timestamp.
  19. =cut
  20. our $parser = JSON::MaybeXS->new();
  21. sub read ($self, $query={}) {
  22. #Optimize direct ID
  23. my @index;
  24. if ($query->{id}) {
  25. @index = ("$datastore/$query->{id}");
  26. } else {
  27. @index = $self->_index();
  28. }
  29. $query->{limit} //= 25;
  30. my $done = 0;
  31. my $grp = aio_group sub {
  32. $done = 1;
  33. };
  34. #TODO up the limit of group appropriately
  35. my $contents = {};
  36. my $num_read = 0;
  37. @index = grep { -f } @index;
  38. my @items;
  39. feed $grp sub {
  40. my $file = shift @index or return;
  41. add $grp (aio_slurp $file, 0, 0, $contents->{$file}, sub {
  42. #Don't waste any time if we dont have to
  43. return if scalar(@items) >= $query->{limit};
  44. my $parsed = $parser->decode($contents->{$file});
  45. #XXX this imposes an inefficiency in itself, get() will filter uselessly again here later
  46. my @filtered = $self->filter($query,@$parsed);
  47. push(@items,@filtered) if @filtered;
  48. });
  49. };
  50. while (@index && !$done) {
  51. IO::AIO::poll_cb();
  52. last if scalar(@items) == $query->{limit};
  53. }
  54. $grp->cancel();
  55. @items = sort {$b->{created} <=> $a->{created} } @items;
  56. return \@items;
  57. foreach my $item (@index) {
  58. my $slurped = eval { File::Slurper::read_text($item) };
  59. if (!$slurped) {
  60. print "Failed to Read $item:\n$@\n";
  61. next;
  62. }
  63. my $parsed = $parser->decode($slurped);
  64. #XXX this imposes an inefficiency in itself, get() will filter uselessly again here
  65. my @filtered = $self->filter($query,@$parsed);
  66. push(@items,@filtered) if @filtered;
  67. last if scalar(@items) == $query->{limit};
  68. }
  69. return \@items;
  70. }
  71. sub _index ($self) {
  72. confess "Can't find datastore!" unless -d $datastore;
  73. opendir(my $dh, $datastore) or confess;
  74. my @index = grep { -f } map { "$datastore/$_" } readdir $dh;
  75. closedir $dh;
  76. return sort { $b cmp $a } @index;
  77. }
  78. sub write($self,$data) {
  79. foreach my $post (@$data) {
  80. my $file = "$datastore/$post->{id}";
  81. my $update = [$post];
  82. if (-f $file) {
  83. my $slurped = File::Slurper::read_text($file);
  84. my $parsed = $parser->decode($slurped);
  85. $update = [(@$parsed, $post)];
  86. }
  87. open(my $fh, '>', $file) or confess;
  88. print $fh $parser->encode($update);
  89. close $fh;
  90. }
  91. }
  92. sub count ($self) {
  93. my @index = $self->_index();
  94. return scalar(@index);
  95. }
  96. sub add ($self,@posts) {
  97. my $ctime = time();
  98. @posts = map {
  99. $_->{id} //= $ctime;
  100. $_->{created} = $ctime;
  101. $_
  102. } @posts;
  103. return $self->SUPER::add(@posts);
  104. }
  105. sub delete($self, @posts) {
  106. foreach my $update (@posts) {
  107. unlink "$datastore/$update->{id}" or confess;
  108. }
  109. return 0;
  110. }
  111. 1;