Skip to content

Commit 08cc78e

Browse files
committed
feat: support primjs
1 parent 7b7e8e1 commit 08cc78e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+15846
-0
lines changed
Lines changed: 261 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,261 @@
1+
/**
2+
* Copyright (c) 2017 Node.js API collaborators. All Rights Reserved.
3+
*
4+
* Use of this source code is governed by a MIT license that can be
5+
* found in the LICENSE file in the root of the source tree.
6+
*/
7+
8+
// Copyright 2024 The Lynx Authors. All rights reserved.
9+
// Licensed under the Apache License Version 2.0 that can be found in the
10+
// LICENSE file in the root directory of this source tree.
11+
12+
#include "code_cache.h"
13+
14+
#include <stdio.h>
15+
16+
#include <algorithm>
17+
#include <utility>
18+
19+
#if OS_ANDROID
20+
#include "basic/log/logging.h"
21+
#else
22+
#define VLOGD(...) void((__VA_ARGS__))
23+
#endif // OS_ANDROID
24+
25+
#ifdef PROFILE_CODECACHE
26+
#define INCREASE(target) ++(target)
27+
#else
28+
#define INCREASE(target)
29+
#endif // PROFILE_CODECACHE
30+
31+
constexpr double CacheBlob::MAGIC;
32+
33+
bool CacheBlob::insert(const std::string& filename, const uint8_t* data,
34+
int length) {
35+
// too large (more than half of MAX) cached data must be discarded
36+
if (length == 0 || data == nullptr) {
37+
INCREASE(expired_query_);
38+
return false;
39+
}
40+
CachedData* target = nullptr;
41+
const uint8_t* old_data = nullptr;
42+
{
43+
std::unique_lock<std::mutex> lock(write_mutex_);
44+
auto it = cache_map_.find(filename);
45+
if (it != cache_map_.end()) {
46+
target = it->second;
47+
current_size_ -= target->length_;
48+
remove_from_ranking_list(target);
49+
50+
if (get_enough_space(length)) {
51+
if (mode_ == kAppending) mode_ = kWriting;
52+
target->length_ = length;
53+
} else {
54+
cache_map_.erase(it);
55+
return false;
56+
}
57+
58+
} else if (get_enough_space(length)) {
59+
target = new CachedData(length, nullptr, filename);
60+
cache_map_[filename] = target;
61+
if (mode_ == kAppending) {
62+
if (append_vec_ == nullptr) {
63+
append_vec_ = new CacheVector();
64+
}
65+
append_vec_->push_back(target);
66+
}
67+
} else {
68+
return false;
69+
}
70+
71+
old_data = target->data_;
72+
target->data_ = data;
73+
}
74+
75+
heat_ranking_.push_back(target);
76+
current_size_ += length;
77+
if (old_data) {
78+
delete[] old_data;
79+
}
80+
INCREASE(target->used_times_);
81+
82+
return true;
83+
}
84+
85+
// That len will be not nullptr is ensured by the context.
86+
const CachedData* CacheBlob::find(const std::string& filename, int* len) const {
87+
if (!write_mutex_.try_lock()) {
88+
INCREASE(missed_query_);
89+
INCREASE(total_query_);
90+
return empty_cache_.get();
91+
}
92+
auto it = cache_map_.find(filename);
93+
write_mutex_.unlock();
94+
CachedData* result = nullptr;
95+
if (it != cache_map_.end()) {
96+
// every time a cache is searched, its heat-ranking
97+
// rises and thus the ranking list changes
98+
result = it->second;
99+
*len = result->length_;
100+
INCREASE(result->used_times_);
101+
} else {
102+
*len = 0;
103+
INCREASE(missed_query_);
104+
}
105+
INCREASE(total_query_);
106+
107+
// NOTE:
108+
// The result cached data is safe beyond this scope,
109+
// because JS engines got this data and then use it
110+
// consecutively in one thread. Tasks that insert new
111+
// data for the same filename will only be posted after
112+
// that cached data has been already used up.
113+
return result;
114+
}
115+
116+
void CacheBlob::remove(const std::string& filename) {
117+
auto it = cache_map_.find(filename);
118+
if (it != cache_map_.end()) {
119+
current_size_ -= it->second->length_;
120+
delete it->second;
121+
cache_map_.erase(it);
122+
if (mode_ == kAppending) mode_ = kWriting;
123+
}
124+
}
125+
126+
// cache file structure:
127+
// Header:
128+
// | magic number | --> 8 bytes
129+
// Body :
130+
// | file name size | --> 2 bytes
131+
// | file name | --> x bytes
132+
// | cache data length | --> 4 bytes
133+
// | cache data | --> y bytes
134+
// ...
135+
void CacheBlob::output() {
136+
if (mode_ == kAppending && append_vec_ == nullptr) return;
137+
bool appending = mode_ == kAppending && append_vec_ != nullptr;
138+
139+
FILE* file_out = appending ? fopen(target_path_.c_str(), "ab")
140+
: fopen(target_path_.c_str(), "wb");
141+
if (file_out) {
142+
if (appending) {
143+
for (auto it : *append_vec_) write_cache_unit(file_out, it);
144+
} else {
145+
fwrite(&MAGIC, DOUBLE_SIZE, 1, file_out);
146+
for (auto& it : cache_map_) write_cache_unit(file_out, it.second);
147+
}
148+
fclose(file_out);
149+
VLOGD("codecache: output cache file %s succeed.\n", target_path_.c_str());
150+
}
151+
}
152+
153+
// steps to rebuild blob:
154+
// 1. read and check magic number;
155+
// 2. read 2 bytes to get the size (x) of file name;
156+
// 3. read x bytes to get the file name;
157+
// 4. read 4 bytes to get the size (y) of cache data;
158+
// 5. read y bytes to get the actual content of cache data;
159+
// 6. go back to step 2 until EOF
160+
bool CacheBlob::input() {
161+
FILE* file_in = fopen(target_path_.c_str(), "rb");
162+
163+
bool succeeded = false;
164+
if (file_in) {
165+
double maybe_magic;
166+
fread(reinterpret_cast<char*>(&maybe_magic), DOUBLE_SIZE, 1, file_in);
167+
// check whether file is valid.
168+
if (maybe_magic == MAGIC) {
169+
int c;
170+
while ((c = fgetc(file_in)) != EOF) {
171+
ungetc(c, file_in);
172+
read_cache_unit(file_in);
173+
}
174+
mode_ = kAppending;
175+
succeeded = true;
176+
}
177+
fclose(file_in);
178+
}
179+
return succeeded;
180+
}
181+
182+
#ifdef PROFILE_CODECACHE
183+
void CacheBlob::dump_status(void* p) {
184+
std::vector<std::pair<std::string, int> >* status_vec =
185+
reinterpret_cast<std::vector<std::pair<std::string, int> >*>(p);
186+
std::sort(heat_ranking_.begin(), heat_ranking_.end(), CachedData::compare);
187+
status_vec->push_back(std::pair<std::string, int>("Total", total_query_));
188+
status_vec->push_back(std::pair<std::string, int>("Missed", missed_query_));
189+
status_vec->push_back(std::pair<std::string, int>("Expired", expired_query_));
190+
status_vec->push_back(std::pair<std::string, int>(
191+
"Updated", mode_ == kAppending && append_vec_ == nullptr ? 0 : 1));
192+
status_vec->push_back(std::pair<std::string, int>("Size", current_size_));
193+
status_vec->push_back(std::pair<std::string, int>("Heat Ranking, total ",
194+
heat_ranking_.size()));
195+
for (size_t i = 0; i < heat_ranking_.size(); ++i) {
196+
CachedData* dt = heat_ranking_[i];
197+
status_vec->push_back(
198+
std::pair<std::string, int>(dt->file_name_, dt->used_times_));
199+
}
200+
}
201+
#endif // PROFILE_CODECACHE
202+
203+
void CacheBlob::write_cache_unit(FILE* file_out, const CachedData* unit) {
204+
// write file name
205+
uint16_t size = static_cast<uint16_t>(unit->file_name_.size());
206+
fwrite(static_cast<void*>(&size), SHORT_SIZE, 1, file_out);
207+
fwrite(unit->file_name_.c_str(), 1, unit->file_name_.size(), file_out);
208+
209+
uint32_t length = unit->length_;
210+
fwrite(static_cast<void*>(&length), INT_SIZE, 1, file_out);
211+
fwrite(unit->data_, 1, unit->length_, file_out);
212+
}
213+
214+
void CacheBlob::read_cache_unit(FILE* file_in) {
215+
uint16_t filename_length;
216+
fread(&filename_length, SHORT_SIZE, 1, file_in);
217+
std::string name(filename_length, '\0');
218+
fread(&name[0], 1, filename_length, file_in);
219+
220+
uint32_t data_length;
221+
fread(&data_length, INT_SIZE, 1, file_in);
222+
uint8_t* data = new uint8_t[data_length];
223+
fread(data, 1, data_length, file_in);
224+
225+
CachedData* cd = new CachedData(static_cast<int>(data_length), data, name);
226+
227+
current_size_ += data_length;
228+
heat_ranking_.push_back(cd);
229+
cache_map_[name] = cd;
230+
}
231+
232+
void CacheBlob::remove_from_ranking_list(CachedData* target) {
233+
for (size_t i = 0; i < heat_ranking_.size(); ++i) {
234+
if (target == heat_ranking_[i]) {
235+
heat_ranking_.erase(heat_ranking_.begin() + i);
236+
break;
237+
}
238+
}
239+
}
240+
241+
// This is a vast-time-costing function
242+
bool CacheBlob::get_enough_space(int data_size) {
243+
// 0. check whether available space is enough
244+
if (current_size_ + data_size <= max_capacity_) return true;
245+
int size_needed = data_size + current_size_ - max_capacity_;
246+
// 1. sort the heat_ranking_
247+
std::sort(heat_ranking_.begin(), heat_ranking_.end(), CachedData::compare);
248+
// 2. pick CachedDatas
249+
for (int i = heat_ranking_.size() - 1; i >= 0; --i) {
250+
size_needed -= heat_ranking_[i]->length_;
251+
if (size_needed <= 0) {
252+
for (size_t j = i; j < heat_ranking_.size(); ++j) {
253+
CachedData* it = heat_ranking_[j];
254+
remove(it->file_name_);
255+
}
256+
heat_ranking_.erase(heat_ranking_.begin() + i, heat_ranking_.end());
257+
return true;
258+
}
259+
}
260+
return false;
261+
}
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
/**
2+
* Copyright (c) 2017 Node.js API collaborators. All Rights Reserved.
3+
*
4+
* Use of this source code is governed by a MIT license that can be
5+
* found in the LICENSE file in the root of the source tree.
6+
*/
7+
8+
// Copyright 2024 The Lynx Authors. All rights reserved.
9+
// Licensed under the Apache License Version 2.0 that can be found in the
10+
// LICENSE file in the root directory of this source tree.
11+
12+
#ifndef SRC_NAPI_COMMON_CODE_CACHE_H_
13+
#define SRC_NAPI_COMMON_CODE_CACHE_H_
14+
15+
#include <memory>
16+
#include <mutex>
17+
#include <string>
18+
#include <unordered_map>
19+
#include <vector>
20+
21+
struct CachedData {
22+
CachedData() : length_(0), data_(nullptr), file_name_("") {}
23+
24+
CachedData(int length, const uint8_t* data, const std::string& name)
25+
: length_(length), data_(data), file_name_(name) {}
26+
27+
// carefully copy and move objects of this class
28+
~CachedData() {
29+
if (data_) delete[] data_;
30+
}
31+
32+
static bool compare(CachedData* left, CachedData* right) {
33+
if (left->used_times_ > right->used_times_) {
34+
return true;
35+
} else if (left->used_times_ < right->used_times_) {
36+
return false;
37+
} else {
38+
return left->length_ > right->length_;
39+
}
40+
}
41+
42+
int used_times_ = 0;
43+
int length_;
44+
const uint8_t* data_;
45+
const std::string file_name_;
46+
};
47+
48+
typedef std::unordered_map<std::string, CachedData*> CacheMap;
49+
typedef std::vector<CachedData*> CacheVector;
50+
#define SHORT_SIZE 2
51+
#define INT_SIZE 4
52+
#define DOUBLE_SIZE 8
53+
54+
class CacheBlob {
55+
private:
56+
enum CacheMode { kWriting, kAppending };
57+
static constexpr double MAGIC = 3.14159265;
58+
59+
public:
60+
explicit CacheBlob(const std::string& path, int max_cap = 1 << 20)
61+
: current_size_(0),
62+
target_path_(path),
63+
max_capacity_(max_cap),
64+
empty_cache_(new CachedData) {}
65+
66+
virtual ~CacheBlob() {
67+
for (auto it : cache_map_) delete it.second;
68+
if (append_vec_) delete append_vec_;
69+
}
70+
71+
// NOTE: modifications on CacheBlob can only happen in worker Thread.
72+
bool insert(const std::string& filename, const uint8_t* data, int length);
73+
const CachedData* find(const std::string& filename, int* len) const;
74+
void remove(const std::string& filename);
75+
void output();
76+
bool input();
77+
int size() const { return current_size_; }
78+
79+
#ifdef PROFILE_CODECACHE
80+
void dump_status(void* p);
81+
#endif // PROFILE_CODECACHE
82+
83+
private:
84+
void write_cache_unit(FILE* file_out, const CachedData* unit);
85+
void read_cache_unit(FILE* file_in);
86+
void remove_from_ranking_list(CachedData* target);
87+
// This is a vast-time-costing function
88+
bool get_enough_space(int data_size);
89+
90+
CacheMap cache_map_;
91+
// ranking list for caches' frequency of being used.
92+
CacheVector heat_ranking_;
93+
int current_size_;
94+
const std::string target_path_;
95+
int max_capacity_;
96+
mutable std::mutex write_mutex_;
97+
std::unique_ptr<CachedData> empty_cache_;
98+
99+
#ifdef PROFILE_CODECACHE
100+
mutable int total_query_ = 0;
101+
mutable int missed_query_ = 0;
102+
mutable int expired_query_ = 0;
103+
#endif // PROFILE_CODECACHE
104+
CacheMode mode_ = kWriting;
105+
CacheVector* append_vec_ = nullptr;
106+
};
107+
108+
#endif // SRC_NAPI_COMMON_CODE_CACHE_H_

0 commit comments

Comments
 (0)