Tuesday 31 December 2019

How can I convert pytorch cpu-based transformation to cuda-based?

The original issue for the code is availablehere.

I am using this repository for a line segmentation project and I developed this code to get an input (whether image or video) and draw road lines on it and give it in output:

import argparse
import sys
from time import time, clock
from os.path import splitext, basename, exists

from model import SCNN
from utils.check_extension import is_video, is_image
from utils.transforms import *
# I will put all the necessary code for utils.transforms after this

# ------------------------------------------------  SCNN parameters
time1 = time()
net = SCNN(input_size=(800, 288), pretrained=False)
mean = (0.3598, 0.3653, 0.3662)  # CULane mean, std
std = (0.2573, 0.2663, 0.2756)
transform_img = Resize((800, 288))
transform_to_net = Compose(ToTensor(), Normalize(mean=mean, std=std))


# ------------------------------------------------  Arguments


def parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--weights', type=str,
                        default='models/vgg_SCNN_DULR_w9.pth',
                        help='path to vgg models')

    parser.add_argument('--input', type=str, default='demo/line_3.mp4',
                        help='path to image file')

    parser.add_argument('--output', type=str, default='public/',
                        help='path to the output directory')
    args = parser.parse_args()

    return args


def main():
    args = parse_args()
    filename, extension = splitext(basename(args.input))

    print("Loading file [{}] ....".format(filename))
    if not exists(args.input):
        print("file [{}] is not recognized".format(args.input))
        sys.exit()


    if is_video(extension):
        video_capture = cv2.VideoCapture()
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        output = args.output + filename + '.avi'

        if video_capture.open(args.input):
            property_id = int(cv2.CAP_PROP_FRAME_COUNT)
            total_frames = int(cv2.VideoCapture.get(video_capture, property_id))
            frame_no = 1
            width, height = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)), \
                            int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = video_capture.get(cv2.CAP_PROP_FPS)
            device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
            save_dict = torch.load(args.weights, map_location=device)
            net.load_state_dict(save_dict['net'])
            net.eval()

            # can't write out mp4, so try to write into an AVI file
            video_writer = cv2.VideoWriter(output, fourcc, fps, (width, height))
            while video_capture.isOpened():
                start = time()
                ret, frame = video_capture.read()
                if not ret:
                    break
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                frame = transform_img({'img': frame})['img']
                x = transform_to_net({'img': frame})['img']
                x.unsqueeze_(0)

                stop1 = time()
                print('stop1: ', stop1 - start)

                seg_pred, exist_pred = net(x)[:2]
                seg_pred = seg_pred.detach().cpu().numpy()
                exist_pred = exist_pred.detach().cpu().numpy()
                seg_pred = seg_pred[0]

                stop2 = time()
                print('stop2: ', stop2 - stop1)

                frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
                lane_img = np.zeros_like(frame)
                color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
                coord_mask = np.argmax(seg_pred, axis=0)
                for i in range(0, 4):
                    if exist_pred[0, i] > 0.5:
                        lane_img[coord_mask == (i + 1)] = color[i]
                img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=frame, beta=1., gamma=0.)
                img = cv2.resize(img, (width, height))

                stop3 = time()
                print('stop3: ', stop3 - stop2)

                # if frame_no % 20 == 0:
                #     print('# {}/{} frames processed!'.format(frame_no, total_frames))
                frame_no += 1
                video_writer.write(img)
                end = time()
                print('Whole loop: {} seconds'.format(end - start))
                print('------------')
                print('------------')

            print('# All frames processed ')

            video_capture.release()
            video_writer.release()


    elif is_image(extension):
        img = cv2.imread(args.input)
        height, width, _ = img.shape
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = transform_img({'img': img})['img']
        x = transform_to_net({'img': img})['img']
        x.unsqueeze_(0)


        save_dict = torch.load(args.weights, map_location='cpu')
        net.load_state_dict(save_dict['net'])
        net.eval()

        seg_pred, exist_pred = net(x)[:2]
        seg_pred = seg_pred.detach().cpu().numpy()
        exist_pred = exist_pred.detach().cpu().numpy()
        seg_pred = seg_pred[0]


        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        lane_img = np.zeros_like(img)
        color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
        coord_mask = np.argmax(seg_pred, axis=0)
        for i in range(0, 4):
            if exist_pred[0, i] > 0.5:
                lane_img[coord_mask == (i + 1)] = color[i]
        img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)
        img = cv2.resize(img, (width, height))
        output = args.output + filename + '.jpg'

        cv2.imwrite(output, img)

    else:
        print("file format [{}] is not supported".format(args.input))
        sys.exit()


if __name__ == '__main__':
    main()

The code which belong to Resize, ToTensor, Normalize, Compose are here:

class Compose(CustomTransform):
    """
    All transform in Compose should be able to accept two non None variable, img and boxes
    """
    def __init__(self, *transforms):
        self.transforms = [*transforms]

    def __call__(self, sample):
        for t in self.transforms:
            sample = t(sample)
        return sample

    def __iter__(self):
        return iter(self.transforms)

    def modules(self):
        yield self
        for t in self.transforms:
            if isinstance(t, Compose):
                for _t in t.modules():
                    yield _t
            else:
                yield t


class Normalize(CustomTransform):
    def __init__(self, mean, std):
        self.transform = Normalize_th(mean, std)

    def __call__(self, sample):
        img = sample.get('img')

        img = self.transform(img)

        _sample = sample.copy()
        _sample['img'] = img
        return _sample


class ToTensor(CustomTransform):
    def __init__(self, dtype=torch.float):
        self.dtype=dtype

    def __call__(self, sample):
        img = sample.get('img')
        segLabel = sample.get('segLabel', None)
        exist = sample.get('exist', None)

        img = img.transpose(2, 0, 1)
        img = torch.from_numpy(img).type(self.dtype) / 255.
        if segLabel is not None:
            segLabel = torch.from_numpy(segLabel).type(torch.long)
        if exist is not None:
            exist = torch.from_numpy(exist).type(torch.float32)  # BCEloss requires float tensor

        _sample = sample.copy()
        _sample['img'] = img
        _sample['segLabel'] = segLabel
        _sample['exist'] = exist
        return _sample


class Resize(CustomTransform):
    def __init__(self, size):
        if isinstance(size, int):
            size = (size, size)
        self.size = size  #(W, H)

    def __call__(self, sample):
        img = sample.get('img')
        segLabel = sample.get('segLabel', None)

        img = cv2.resize(img, self.size, interpolation=cv2.INTER_CUBIC)
        if segLabel is not None:
            segLabel = cv2.resize(segLabel, self.size, interpolation=cv2.INTER_NEAREST)

        _sample = sample.copy()
        _sample['img'] = img
        _sample['segLabel'] = segLabel
        return _sample

    def reset_size(self, size):
        if isinstance(size, int):
            size = (size, size)
        self.size = size

The code works fine but I found out that its too slow for testing in real-time application. I added some time measurement to see if I can find out the bottlenecks and this is the output for one loop:

------------
stop1:  0.002989053726196289
stop2:  1.4032211303710938
stop3:  0.004946708679199219
Whole loop: 1.41636061668396 seconds

These lines happened to be the most computationally expensive lines:

seg_pred, exist_pred = net(x)[:2]
seg_pred = seg_pred.detach().cpu().numpy()
exist_pred = exist_pred.detach().cpu().numpy()
seg_pred = seg_pred[0]

Now I am stuck with this issue that how I can modify the code to improve the computation speed.

Initially I thought of modifying the code to allow cuda computation. I asked the main author how I can modify the code for cuda version in here and he pointed out to these lines:

frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = transform_img({'img': frame})['img']
x = transform_to_net({'img': frame})['img']
x.unsqueeze_(0)

Unfortunately my experience with pytorch is not much, so I am asking for help now.

I hope the information I shared suffices for the readers. Any help would be appreciated

Thanks



from How can I convert pytorch cpu-based transformation to cuda-based?

Laravel - save many related models in saved event not work on update

I have three models to manage roles in my application: Role, User, UserRole.

In User model class there is an HasMany relation and in the database there is a foreign key in UserRole related table on user_code field:

public function userRoles(): HasMany
{
    return $this->hasMany('App\Models\UserRole', 'user_code', 'code');
}

I have a method for assign roles after saving an User model:

public function assignRole(...$roles)
{
    $userRoles = collect($roles)->flatten()->map(function($role) {
        return new UserRole(['code' => $role instanceof Role ? $role->code: $role]);
    });

    $this::saved(
        function ($object) use ($userRoles) {
            $object->userRoles()->delete();
            $object->userRoles()->saveMany($userRoles);
            $object->load('userRoles');
        }
    );

    return $this;
}

In UserController there is an update action for change roles to an User:

public function update(UpdateUser $request, User $user)
{
    $data = $request->all(); 
    $user->fill($data);
    $user->assignRole($data['roles']);

    if (!$user->save()) {
    ...

The problem is that when I update the $user I get an exception:

Illuminate\Database\QueryException

SQLSTATE[23503]: Foreign key violation...

This is caused by $object->userRoles()->saveMany($userRoles); in saved event.

I think it's due to the fact when it saves the related models the updated $user is not persisted to the database and the foreign key constraints on user_code are violated. But the saved event callback ** works well on create** user model. Why? And how can I resolve it?



from Laravel - save many related models in saved event not work on update

Submit form without uploading image

I am using this pluging to image upload using ajax http://hayageek.com/docs/jquery-upload-file.php

But here in this pluging there isn't any option to make uploader optional with form submit.

Here is my form code i want to make upload field optional.

currently it is not submitting form when i am not uploading any image to uploader.

    <script type="text/javascript">
    //<![CDATA[


    jQuery(document).ready(function() {
    var extraObj = jQuery("#warranty-comment-image-upload").uploadFile({
        url: "<?php echo $this->getUrl('warranty/index/savecomment',array('id'=>$params['id'],'key'=>$params['key']));?>",
        fileName: "img",
        showPreview: true,
        dynamicFormData: function()
        {
            var data = jQuery('#commentForm').serialize();
            return data;
        },
        previewHeight: "128px",
        returnType:'json',
        uploadStr:"<?php echo  $this->__('Upload files') ?>",
        previewWidth: "128px",
        allowedTypes: "jpg,jpeg,gif,png,pdf",
        autoSubmit: false,
        showDelete: true,

    });
    jQuery("#warranty-comment-image-upload-button").click(function() {
         var formToValidate = $('commentForm');
        var validator = new Validation(formToValidate);
        if(validator.validate()) {

            extraObj.startUpload();

        }

    });

});


    //]]>
</script>

Form HTML

<form action="<?php echo $this->getUrl('warranty/index/savecomment',array('id'=>$params['id'],'key'=>$params['key']));?>"  id="commentForm" method="post" enctype="multipart/form-data">

<input type="hidden" name="comment_type" value="<?php echo Mage::helper('warranty')->getCommentType();?>">
<input type="hidden" name="warranty_id" value="<?php echo $this->getRequest()->getParam('id'); ?>">
<div class="fieldset">
    <h2 class="legend"><?php echo $this->__('Warranty Information') ?></h2>

    <?php $warrantyweek = Mage::getStoreConfig('warrantytext/general/warrantytext',Mage::app()->getStore()->getStoreId()); 
         $warrantyMessage = Mage::getStoreConfig('warrantytext/general/warrantymessage',Mage::app()->getStore()->getStoreId()); 
    if($warrantyweek){
    ?>
    <div class="warranty-text"><?php echo $warrantyMessage ?></div>
    <?php } ?>
    <?php if($warrantyExtraMessage = Mage::getStoreConfig('warrantytext/general/warrantyextramessage',Mage::app()->getStore()->getStoreId())): ?>
    <div class="warranty-extra-text"><?php echo $warrantyExtraMessage ?></div>
    <?php endif; ?>

    <ul class="form-list">                                                                 

        <li class="fields">
           <h3 class="tittle"><?php echo $this->__($sectionData['title']) ?></h3>
            <div id="warranty-comment-image-upload" class="extraupload"><?php echo $this->__('Upload Files Here') ?></div>
        </li>
        <li class="wide">
            <label for="comment" class="required"><em>*</em><?php echo $this->__('Comment') ?></label>
            <div class="input-box">
                <textarea name="comment" id="comment" title="<?php echo $this->__('Comment') ?>" class="required-entry input-text" cols="5" rows="3"></textarea>
            </div>
        </li>
    </ul>
</div>
<div class="buttons-set">
    <p class="back-link"><a href="<?php echo $this->escapeUrl($this->getUrl('warranty')) ?>"><small>&laquo; </small><?php echo $this->__('Back') ?></a></p>
    <p class="required"><?php echo $this->__('* Required Fields') ?></p>
    <button id="warranty-comment-image-upload-button" type="button"  title="<?php echo $this->__('Submit') ?>" class="button"><span><span><?php echo $this->__('Submit') ?></span></span></button>
</div>


from Submit form without uploading image

Effective methods for planning new projects (or 3rd part applications)

I often have an issue when starting a project where I dont plan things correctly, its one thing that really slows me down. Is there a standard or most commonly used method for planning Web Applications specifically (I write in php if that makes a difference). I know there are many ways to plan a project but I Was hoping to get some advice or methodology for planning or if anyone can recommend Project Planning Software that might be useful?

Due to not getting enough upvotes I can't ask anymore questions on this totalitarian like platform. So I had to edit an old question (I have nothing to lose at this point)



from Effective methods for planning new projects (or 3rd part applications)

snakemake cluster script ImportError snakemake.utils

I have a strange issue that comes and goes randomly and I really can't figure out when and why.
I am running a snakemake pipeline like this:

conda activate $myEnv    
snakemake -s $snakefile --configfile test.conf.yml --cluster "python $qsub_script" --latency-wait 60 --use-conda -p -j 10 --jobscript "$job_script"

I installed snakemake 5.9.1 (also tried downgrading to 5.5.4) within a conda environment.
This works fine if I just run this command, but when I qsub this command to the PBS cluster I'm using, I get an error. My qsub script looks like this:

#PBS stuff...

source ~/.bashrc
hostname
conda activate PGC_de_novo

cd $workDir
snakefile="..."
qsub_script="pbs_qsub_snakemake_wrapper.py"
job_script="..."
snakemake -s $snakefile --configfile test.conf.yml --cluster "python $qsub_script" --latency-wait 60 --use-conda -p -j 10 --jobscript "$job_script" >out 2>err

And the error message I get is:

...
Traceback (most recent call last):
  File "/path/to/pbs_qsub_snakemake_wrapper.py", line 6, in <module>
    from snakemake.utils import read_job_properties
ImportError: No module named snakemake.utils
Error submitting jobscript (exit code 1):
...

So it looks like for some reason my cluster script doesn't find snakemake, although snakemake is clearly installed. As I said, this problem keeps coming and going. It'd stay for a few hours, then go away for now aparent reason. I guess this indicates an environment problem, but I really can't figure out what, and ran out of ideas. I've tried:

  • different conda versions
  • different snakemake versions
  • different nodes on the cluster
  • ssh to the node it just failed on and try to reproduce the error

but nothing. Any ideas where to look? Thanks!



from snakemake cluster script ImportError snakemake.utils

Android App Bundle from React Native: You uploaded an APK or Android App Bundle with invalid or missing signing information for some of its files

I'm trying to publish an app for the first time to the Google Play Store. I've opted in to Google Play Signing. I know for a fact that I'm signing the Android App Bundle with the right key since, when I'm uploading using another key, the Console will tell me to upload using the other key with the specific SHA1 identifier. However, when I do upload with the correct key, I get this error:

You uploaded an APK or Android App Bundle with invalid or missing signing information for some of its files.

I'm building the app using Android Studio like so: Build > Generate Signed Bundle / APK > Android App Bundle > Choosing my keystore and entering the password > release > Finish

The app is a React Native app built with detached Expo / ExpoKit. Uploading to the Apple App Store works just fine, I only have problems with the Play Store. What am I missing?

Update 1: I now completely deleted the app from Google Play and created a new one. Did not opt in for Google Play Signing this time and uploaded a signed APK. Still the same error.

Update 2: Tried with a completely new keystore and key. Still the same.



from Android App Bundle from React Native: You uploaded an APK or Android App Bundle with invalid or missing signing information for some of its files

Dynamic Component inside ngx-datatable row-detail

I'm creating a reusable datatable using ngx-datatable and I would like to have dynamic components rendered inside the row detail. The datatable component receives a component class as an argument from a parent module and I use ComponentFactory to createComponent. I can see that the constructor and the onInit methods are running for the dynamic component but it is not being attached to the DOM.

This is what the datatable html looks like for the row-detail :

 <!-- [Row Detail Template] -->
        <ngx-datatable-row-detail rowHeight="100" #myDetailRow (toggle)="onDetailToggle($event)">
          <ng-template let-row="row" #dynamicPlaceholder let-expanded="expanded" ngx-datatable-row-detail-template>
          </ng-template>
        </ngx-datatable-row-detail>
 <!-- [/Row Detail Template] -->

And this is what my .ts file looks like :

@ViewChild('myDetailRow', {static: true, read: ViewContainerRef}) myDetailRow: ViewContainerRef;
@ViewChild('dynamicPlaceholder', {static: true, read: ViewContainerRef}) dynamicPlaceholder: ViewContainerRef;

renderDynamicComponent(component) {
        var componentFactory = this.componentFactoryResolver.resolveComponentFactory(component);
        var hostViewConRef1 = this.myDetailRow;
        var hostViewConRef2 = this.dynamicPlaceholder;
        hostViewConRef1.createComponent(componentFactory);
        hostViewConRef2.createComponent(componentFactory);
}

Another point is that if my #dynamicPlaceholder ng-template is placed outside of ngx-datatable, it works and the dynamic module is rendered and displayed.



from Dynamic Component inside ngx-datatable row-detail

QFileDialog always opens behind main window

I'm trying to open a file in my PySide2 application, but the file dialog always opens below the main window and appears as another application in the launcher. The application's name is "Portal".

I see other answers where the solution is to pass the main window as the first parameter to getOpenFileName(), but that doesn't work for me.

Here's a simple demonstration of the problem:

import sys
from PySide2.QtWidgets import QPushButton, QFileDialog, QApplication


class DemoButton(QPushButton):
    def __init__(self, text):
        super().__init__(text)
        self.clicked.connect(self.on_click)

    def on_click(self):
        file_name, _ = QFileDialog.getOpenFileName(
            self,
            "Open a text file.",
            filter='Text file (*.txt)')
        print(file_name)


def main():
    app = QApplication(sys.argv)
    button = DemoButton("Hello World")
    button.show()
    app.exec_()
    sys.exit()


main()

I thought maybe the parent had to be a QMainWindow, so I tried that:

import sys

from PySide2 import QtWidgets


class MainWindow(QtWidgets.QMainWindow):
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)
        main_widget = QtWidgets.QWidget(self)
        self.setCentralWidget(main_widget)

        # layout initialize
        g_layout = QtWidgets.QVBoxLayout()
        layout = QtWidgets.QFormLayout()
        main_widget.setLayout(g_layout)

        # Add Widgets
        self.exec_btn = QtWidgets.QPushButton('Execute')
        self.exec_btn.clicked.connect(self.find_file)

        # global layout setting
        g_layout.addLayout(layout)
        g_layout.addWidget(self.exec_btn)

    def find_file(self):
        file_name, _ = QtWidgets.QFileDialog.getOpenFileName(
            self,
            "Open a text file.",
            filter='Text file (*.txt)')
        print(file_name)


def main():
    app = QtWidgets.QApplication(sys.argv)
    window = MainWindow()
    window.show()
    app.exec_()
    sys.exit()


main()

The file dialog behaved exactly the same.

I'm using PySide2 5.12.2, Python 3.6.7, and running on Ubuntu 18.04.



from QFileDialog always opens behind main window

Deserialize nested JSON structures to Django model objects

I need to consume a service that sends JSON responses containing JSON-serialized nested structures, which I would like to deserialize and store in my database - my application uses Django.

Business rules are the following:

  1. The query returns objects which always have an id property which is a unique integer, often a createdAt property and an updatedAt property, both with datetime data, and then several other properties which are primitive types (int, float, str, datetime, etc.), and several properties that can be another object or an array of objects.

  2. In case the property value is an object, then the parent relates to it through a 'foreign key'. In case it's an array of objects, then we have two scenarios: either the objects of the array relate to the parent through a 'foreign key', or the parent and each member of the array are related through a 'many-to-many' relation.

  3. I need to mirror each of those objects in my database, so each model has an id field which is the primary key, but it's not autogenerated, because the real ids will be provided with the imported data.

  4. The relations between all those entities are already mirrored in my model schema. I adopted this approach (mirroring data structure) because if I flatten the received data to save it all into a single table, there will be horrendous replication, defying all data normalization rules.

  5. For every root object, I need to do this:

    • check whether there is already a record in database for that id
    • create a new record in case there isn't
    • update the existing record in case there is already one (update might be skipped if updatedAt values are the same for both the record and the incoming data
    • recursively repeat these same steps for each nested object that is the provided value for one of its parent's properties.

Below I'm reproducing a very simplified sample of the data I receive from the service and the models I in which I want to store it. The real thing is much, much more bulky and complex than that, and that's why I'm so wanting to learn a way of letting the ORM take care of the problem, should it be able to. Hard-coding the whole thing is taking forever, aside of being pretty error-prone and creating a maintenance hell should the data schema change in the future.

JSON sample:

{
  "records": [
    {
      "id": 25228371,
      "createdAt": "2018-07-08 23:00:00",
      "updatedAt": "2019-03-08 09:45:52",
      "field1": "foo",
      "field2": 2,
      "field3": {
        "date": "2019-03-08 09:45:52"
      },
      "entityA": {
        "id": 174,
        "createdAt": "2018-07-08 23:00:00",
        "updatedAt": "2019-03-08 09:45:52",
        "field4": "bar",
        "field5": 1
      },
      "entityB": {
        "id": 6059889,
        "field6": "zoot",
        "field7": {
          "date": "2015-05-11 00:00:00"
        },
        "entityC": {
          "id": 3,
          "field8": "ni"
        },
        "entityD": {
          "id": 20879,
          "createdAt": "2018-07-08 23:00:00",
          "updatedAt": "2019-03-08 09:45:52",
          "field9": "aah",
          "entityE": {
            "id": 7,
            "field10": 4
          },
          "entityE_id": 7
        },
        "entityC_id": 3,
        "entityD_id": 20879
      },
      "entityFvinculations": [
        {
          "id": 3423557,
          "field11": "a newt",
          "field12": {
            "date": "2019-03-08 10:29:19"
          },
          "entityG": {
            "id": 416038854,
            "field13": 0,
            "field14": {
              "date": "2019-03-07 14:45:53"
            }
          }
        },
        {
          "id": 3423579,
          "field11": "a witch",
          "field12": {
            "date": "2019-03-08 10:29:19"
          },
          "entityG": {
            "id": 4160521578,
            "field13": 0,
            "field14": {
              "date": "2019-03-12 11:24:07"
            }
          }
        }
      ],
      "entityA_id": 174,
      "entityB_id": 6059889
    }
  ]
}

Models.py sample:

from django.db.models import *


class EntityRoot(Model):
    id = PositiveIntegerField(primary_key=True)
    createdAt = DateTimeField(null=True)
    updatedAt = DateTimeField(null=True)
    field1 = CharField(max_length=100, null=True)
    field2 = PositiveIntegerField(null=True)
    field3 = DateTimeField(null=True)
    entityA = ForeignKey('EntityA', null=True, on_delete=SET_NULL)
    entityB = ForeignKey('EntityB', null=True, on_delete=SET_NULL)


class EntityA(Model):
    id = PositiveIntegerField(primary_key=True)
    createdAt = DateTimeField(null=True)
    updatedAt = DateTimeField(null=True)
    field4 = CharField(max_length=100, null=True)
    field5 = PositiveIntegerField(null=True)


class EntityB(Model):
    id = PositiveIntegerField(primary_key=True)
    field6 = CharField(max_length=100, null=True)
    field7 = DateTimeField(null=True)
    entityC = ForeignKey('EntityC', null=True, on_delete=SET_NULL)
    entityD = ForeignKey('EntityD', null=True, on_delete=SET_NULL)


class EntityC(Model):
    id = PositiveIntegerField(primary_key=True)
    field8 = CharField(max_length=100, null=True)


class EntityD(Model):
    id = PositiveIntegerField(primary_key=True)
    createdAt = DateTimeField(null=True)
    updatedAt = DateTimeField(null=True)
    field9 = CharField(max_length=100, null=True)
    entityE = ForeignKey('EntityE', null=True, on_delete=SET_NULL)


class EntityE(Model):
    id = PositiveIntegerField(primary_key=True)
    field10 = PositiveIntegerField(null=True)


class VinculationEntitiesRootAndF(Model):
    entityRoot = ForeignKey('EntityRoot', on_delete=CASCADE)
    entityE = ForeignKey('EntityF', on_delete=CASCADE)


class EntityF(Model):
    id = PositiveIntegerField(primary_key=True)
    field11 = CharField(max_length=100, null=True)
    field12 = DateTimeField(null=True)
    entityG = ForeignKey('EntityG', null=True, on_delete=SET_NULL)


class EntityG(Model):
    id = PositiveIntegerField(primary_key=True),
    field13 = PositiveIntegerField(null=True)
    field14 = DateTimeField(null=True)

EDIT (ref. DRF Serializers):

I'm trying to follow Max Malysh I Reinstate Monica's suggestion, and I started to work on a recursive serializer:

from django.db.models import Manager, Model, Field, DateTimeField, ForeignKey
from rest_framework.serializers import ModelSerializer


class RecursiveSerializer(ModelSerializer):
    manager: Manager
    field_dict: dict

    def __init__(self, target_manager: Manager, data: dict, **kwargs):
        self.manager = target_manager
        self.Meta.model = self.manager.model
        self.field_dict = {f.name: f for f in self.manager.model._meta.fields}
        instance = None
        data = self.process_data(data)
        pk_name = self.manager.model._meta.pk.name
        if pk_name in data:
            try:
                instance = target_manager.get(pk=data[pk_name])
            except target_manager.model.DoesNotExist:
                pass
        super().__init__(instance, data, **kwargs)

    def process_data(self, data: dict):
        processed_data = {}
        for name, value in data.items():
            field: Field = self.field_dict.get(name)
            if isinstance(value, dict):
                if isinstance(field, ForeignKey):
                    processed_data[name] = self.__class__(field.related_model.objects, data=value)
                    continue
                elif len(value) == 1 and 'date' in value and isinstance(field, DateTimeField):
                    processed_data[name] = value['date']
                    continue
            processed_data[name] = value
        return processed_data

    class Meta:
        model: Model = None
        fields = '__all__'

However, it does a weird thing: when first run, against an empty database, it only creates the last and most deeply nested object. In the second run, it does nothing and returns a code='unique' validation error saying that such object already exists.

Now I must say I'm quite new to Python and Django (I come from .NET development) and the difficulties I'm facing about this task begin to look very awkward for me. I've been reading docs about Django and DRF, which helped me less than I expected. Yet I refuse to believe aforementioned language and framework lack resources for performing such a trivial operation. So, If I'm missing something very obvious, as it seems, for lack of knowledge of mine, I'll be grateful if someone teaches me what I seem not to know here.



from Deserialize nested JSON structures to Django model objects

How can I allow people to subscribe to my Facebook app to their Page?

I have a Facebook app that can be used for certain businesses. Instead of manually connecting my app and their pages, is it possible for me to create a website where a customer can login through Facebook, select their Page, and connect it with my App with certain subscription permissions? Where do I even begin? I can't find more info on https://developers.facebook.com/

Basically I am trying to have functionality similar to manychat.com just for my own service.



from How can I allow people to subscribe to my Facebook app to their Page?

How can I allow people to subscribe to my Facebook app to their Page?

I have a Facebook app that can be used for certain businesses. Instead of manually connecting my app and their pages, is it possible for me to create a website where a customer can login through Facebook, select their Page, and connect it with my App with certain subscription permissions? Where do I even begin? I can't find more info on https://developers.facebook.com/

Basically I am trying to have functionality similar to manychat.com just for my own service.



from How can I allow people to subscribe to my Facebook app to their Page?

Why is Unity IAP service not working on Android production version?

Something really strange is happening to me with Unity IAP service. Purchases were working correctly until the last version of the app. When uploading the last version, the IAP buttons are not showing any data and are not clickable but nothing has changed in the code that may affect it (in fact the last version includes only minor changes on strings). The error that android studio shows is:

A scripted object (probably UnityEngine.Purchasing.IAPButton?) has a different serialization layout when loading. (Read 32 bytes but expected 624 bytes) Did you #ifdef UNITY_EDITOR a section of your serialized properties in any of your scripts?

But that was not happening in the immediately previous version of the app. I tried to go back to the previous version using Git but the problem persists. Also, nothing was changed recently in that script and I couldn't find any #ifdef condition.

But here is the weirdest thing: when compiling for production and installing the resulting app to a real device, there is no error! Only after uploading the resulting .aab file to the play store it starts to show the error.

Has anybody dealt with this issue?



from Why is Unity IAP service not working on Android production version?

Application Status Bar not working properly

As you can see in this video In starting only signals and battery percentage is visible with no time. But when I move next controller it only shows the time. Before showing the Time it shows a red layer of touch return to AppName`. I am using Swifty Cam library for Image and video recording

Please check the link for the video: Video Link

Any help would be appreciated.

Thanks



from Application Status Bar not working properly

Automatically Importing Data From a Website in Google Cloud

I am trying to find a way to automatically update a big query table using this link: https://www6.sos.state.oh.us/ords/f?p=VOTERFTP:DOWNLOAD::FILE:NO:2:P2_PRODUCT_NUMBER:1

This link is updated with new data every week and I want to be able to replace the Big Query table with this new data. I have researched that you can export spreadsheets to Big Query, but that is not a streamlined approach.

How would I go about submitting a script that imports the data and having that data be fed to Big Query?



from Automatically Importing Data From a Website in Google Cloud

How to Vibrate phone when the screen is off or locked via a background service?

I have a service that checks for updates on my website and I would like it to be able to cause a vibration when a update is found even if the screen is off or locked.

The vibration currently only works if the screen is not off / locked. All other functions does work even if the screen is off / locked.

Vibrator vibrator = (Vibrator) getSystemService(VIBRATOR_SERVICE);
   long[] pattern = new long[]{0, 400, 200, 400};
   if (Build.VERSION.SDK_INT >= 26) { // New API
     vibrator.vibrate(VibrationEffect.createWaveform(pattern,0));
   } else { // Old API
     vibrator.vibrate(pattern, 0);
}

How can I vibrate a phone even if its screen is off? I've tried to use a WakeLock but that doesn't seem to be the problem?

I have all the permissions set, as its working when the screen is on.



from How to Vibrate phone when the screen is off or locked via a background service?

How to create a junction query, that returns a value from the junction itself?

I am trying to query a character with his stats, the problem is that the junction table holds the value of the stat. For example, the character is Fred, the stat is agility, and the value is 10. Meaning Fred has the stat of agility, and his value at it is 10. Is it possible to write a data class with @Relation and Junction to query for this?

I don't see a way to accomplish this.

data class CharacterWithStatsEntity(
    @Embedded val character: CharacterEntity,
    @Relation(
        parentColumn = "id",
        entityColumn = "id",
        entity = StatsEntity::class,
        associateBy = Junction(
            value = CharactersStatsEntity::class,
            parentColumn = "characterId",
            entityColumn = "statsId"
        )
    ) val stats: List<StatsEntity>
)

The code that I am providing is not returning the value from the junction. StatsEntity only holds the stats name, I would need a new entity StatWithValue, that would combine StatEntity and CharactersStatsEntity, and it would hold the stat name and value for the specific character.



from How to create a junction query, that returns a value from the junction itself?

Can't import MultiDexApplication

I have an app built with react native and I need to enable MultiDexsupport. My problem is that I can't import the MultiDexApplication class to extend it because at compile time I get symbol not found error for both the import statement and the class name when extending it in MainApplication.java

build.gradle

    dependencies {
        classpath('com.android.tools.build:gradle:3.5.3')
        classpath 'com.google.gms:google-services:4.2.0'
        classpath "androidx.multidex:multidex:2.0.1"

    }

If I try to add the dependency as implementation, I get the following error when starting the app:

Could not find method implementation() for arguments [androidx.multidex:multidex:2.0.1] on object of type org.gradle.api.internal.artifacts.dsl.dependencies.DefaultDependencyHandler.

In all online resources I saw that the dependecy was added as implementation so I guess that can be my problem.

MainApplication.java

package com.classmanager;

import android.app.Application;
import androidx.multidex.MultiDexApplication;
import android.content.Context;
import com.facebook.react.PackageList;
import com.facebook.react.ReactApplication;
import com.oblador.vectoricons.VectorIconsPackage;
import com.oblador.vectoricons.VectorIconsPackage;
import com.facebook.react.ReactNativeHost;
import com.facebook.react.ReactPackage;
import com.facebook.soloader.SoLoader;
import java.lang.reflect.InvocationTargetException;
import java.util.List;

public class MainApplication extends MultiDexApplication  implements ReactApplication {
  private final ReactNativeHost mReactNativeHost =
      new ReactNativeHost(this) {
        @Override
        public boolean getUseDeveloperSupport() {
          return BuildConfig.DEBUG;
        }

        @Override
        protected List<ReactPackage> getPackages() {
          @SuppressWarnings("UnnecessaryLocalVariable")
          List<ReactPackage> packages = new PackageList(this).getPackages();
          // Packages that cannot be autolinked yet can be added manually here, for example:
          // packages.add(new MyReactNativePackage());
          return packages;
        }

        @Override
        protected String getJSMainModuleName() {
          return "index";
        }
      };

  @Override
  public ReactNativeHost getReactNativeHost() {
    return mReactNativeHost;
  }

  @Override
  public void onCreate() {
    super.onCreate();
    SoLoader.init(this, /* native exopackage */ false);
    initializeFlipper(this); // Remove this line if you don't want Flipper enabled
  }

  //@Override
    //protected void attachBaseContext(Context base) {
       //super.attachBaseContext(base);
       //MultiDex.install(this);
    //}

  /**
   * Loads Flipper in React Native templates.
   *
   * @param context
   */
  private static void initializeFlipper(Context context) {
    if (BuildConfig.DEBUG) {
      try {
        /*
         We use reflection here to pick up the class that initializes Flipper,
        since Flipper library is not available in release mode
        */
        Class<?> aClass = Class.forName("com.facebook.flipper.ReactNativeFlipper");
        aClass.getMethod("initializeFlipper", Context.class).invoke(null, context);
      } catch (ClassNotFoundException e) {
        e.printStackTrace();
      } catch (NoSuchMethodException e) {
        e.printStackTrace();
      } catch (IllegalAccessException e) {
        e.printStackTrace();
      } catch (InvocationTargetException e) {
        e.printStackTrace();
      }
    }
  }
}

It's the default MainApplication from react-native init, except that I imported the MultiDexApplication class and extended it instead of Application. But when I run the app, I get the following error:

    import androidx.multidex.MultiDexApplication;
                        ^
  symbol:   class MultiDexApplication
  location: package androidx.multidex
C:\Users\meadi\WebstormProjects\ClassManager\android\app\src\main\java\com\classmanager\MainApplication.java:16: error: cannot find symbol
public class MainApplication extends MultiDexApplication  implements ReactApplication {
                                     ^
  symbol: class MultiDexApplication
C:\Users\meadi\WebstormProjects\ClassManager\android\app\src\main\java\com\classmanager\MainApplication.java:18: error: incompatible types: MainApplication cannot be converted to Application
      new ReactNativeHost(this) {
                          ^

Any idea why MultiDexApplication couldn't be resolved?

update: I tried adding the multidex dependecy as implementation to the app level build.gradle. It seems now that the class is resolved, but i get this error:

    D8: Cannot fit requested classes in a single dex file (# methods: 100718 > 65536)
com.android.builder.dexing.DexArchiveMergerException: Error while merging dex archives:
The number of method references in a .dex file cannot exceed 64K.

So MultiDex is still not enabled



from Can't import MultiDexApplication

How to optimize database timezones with convert_tz or any other function

I am trying to optimise my mySQL query when handling timezones. My database (mySQL) is set to EET time(+02:00) (I will soon move on AWS where I will use UTC), but in any case, our Cakephp implementation has a setting that retrieves the records as UTC. Our timestamp column his a timestamp type.

So a 2019-12-19 12:44:27 found in our mySQL (+2), is actually 2019-12-19 10:44:27 (UTC) within our CakePHP implementation.

I have created the following query considering a +04:00 timezone.

$company_timezone ='+04:00';
SELECT company_id, COUNT( timestamp ) AS views, url 
FROM behaviour 
WHERE company_id = 1
AND CONVERT_TZ(timestamp,'+00:00','{$company_timezone}')  >= DATE(CONVERT_TZ(NOW(),'+00:00','{$company_timezone}')) 
GROUP BY URL 
ORDER BY views 
DESC LIMIT 20

However this is quite needy in terms of performance. It takes approx 4-5 seconds. Without the convert_tz it takes no more than 0.5 sec.

My question is how can I optimise this? Of course, our timestamp column is indexed, even it doesn't make any sense at the specific query because I use it with convert_tz.

Thank you



from How to optimize database timezones with convert_tz or any other function

Android - How to disable STATE_HALF_EXPANDED state of a bottom sheet

I have a bottom sheet that should go between 2 states, STATE_COLLAPSED and STATE_EXPANDED when it's collapsed the hight should be 200dp and when expanded it will be full screen.

So I'm setting the BottomSheetBehavior with

isFitToContents = false
peekHeight = 200dp

and I'm forced to set a value in halfExpandedRatio otherwise when at STATE_HALF_EXPANDED the bottom sheet will take half of the screen.

I'm working w/ com.google.android.material:material:1.1.0-rc01

Is there a way to disable the STATE_HALF_EXPANDED state?

Or I should actually set skipCollapsed=true, figure out in terms of ratio what 200dp means and work with STATE_HALF_EXPANDED and STATE_EXPANDED instead of STATE_COLLAPSED and STATE_EXPANDED



from Android - How to disable STATE_HALF_EXPANDED state of a bottom sheet

Monday 30 December 2019

Send gtag event from cross domain to parent domain

I'm trying to setup a send event from a iframe originated on my domain and placed on other domain (not mine). I placed the analytics code on the iframe.

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-XXXXXXXX-XX"></script>
<script>
  window.dataLayer = window.dataLayer || [];
  function gtag(){dataLayer.push(arguments);}
  gtag('js', new Date());

  gtag('config', 'UA-XXXXXXXX-XX',{ 'anonymize_ip': true });
</script>

Bellow that analytics code (with the UA-XXXXXXXX-XX from my parentdomain.com), I do a check to see if the iframe is not on my parentdomain.com and then, I set the tracker attribute to the div id ads_close:

<script>
ref = document.referrer;
whitelist = ["parentdomain.com"];
match = false;

for( var i = whitelist.length - 1; i >= 0; i-- ) {

 if( ref.indexOf( whitelist[ i ] ) > -1 ) { match = true; }

}
// If is not the parent domain, then add the onClick atributte to the ID "ads_close"
if( ! match ) {

  refer = document.referrer;
  var str1 ="gtag(\'event\', \'External\', {\'event_category\': \'yes\',\'event_label\': ";
  var str2 = "'";  
  var str3 = refer;
  var str4 = "'";
  var str5 = "});";
  var tracker = str1.concat(str2) + str3 + str4 + str5;
ads_close.setAttribute("onClick", tracker);

 }
</script>

The above code renders this way, IF NOT, on parentdomain.com:

<div class="adspop_close" id="adspop_close" onclick="gtag('event', 'Externos', {'event_category': 'yes','event_label': 'https://www.theotherdomain.com/post/'});"></div>

The problem:

Every time i click on the the div with the ID adspop_close, I cannot see the event on my parentdomain.com google analytics account...

The question:

What am'I doing wrong?



from Send gtag event from cross domain to parent domain

Cannot update credit card using session

I'm trying to update a credit-card using the StripeCheckout, I saw that another user has asked a similar thing here.

The problem's that I'm not able to replicate the session object without passing any amount, I did:

const domainURL = process.env.APP_DOMAIN;

let opts = {
    payment_method_types: ["card"],
    mode: 'setup',
    success_url: `${domainURL}/pay/success?session_id={CHECKOUT_SESSION_ID}`,
    cancel_url: `${domainURL}/pay/cancel`
};

// Make session
let session = await stripe.checkout.sessions.create(opts);

let url = `${domainURL}/pay/checkout-session?sessionId=${session.id}`;
return url;

the code above generate the url for Stripe session, and the following window is opened:

enter image description here

so this seems to works apparently, but when I save the card clicking on "Salva carta" I get:

UnhandledPromiseRejectionWarning: Error: Stripe: Argument "id" must be a string, but got: null (on API request to GET /customers/{id})

I also tried:

 let opts = {
        payment_method_types: ["card"],
        mode: 'setup',
        setup_intent_data: {
            metadata: {
              'customer_id': 'customer id',
            }
        },
        success_url: `${domainURL}/pay/success?session_id={CHECKOUT_SESSION_ID}`,
        cancel_url: `${domainURL}/pay/cancel`
    };

as described here

but same problem



from Cannot update credit card using session

How to take a list and fill with values and put into a dataframe and filter in Python?

I have a list that has a file name and comes with multiple values for each file. I want to put the data into a data frame and filter.So there's 4 files and values.There should be 5 values each file that start with 'ab ..' or 'uo ..'.However some are missing values that are not there like in file one 'ab +1','co(0)','uo +2.5'.. where 'ab +1','co(0)' is missing 3 additional values. Iam looking to exclude those or fill in values with 0's.

values = [
    'file1','ab +5','co(0)','+107','+108','95%','ab +1','co(0)','uo +2.5','co(0)','+107','+107','90%','uo +2.5','co(0)','+107','+110','90%',
    'file2','ab +0.5','co(1)','ab +2.5','co(0)','+106','+102','95%','ab +2.5','co(0)','ab +8.0','co(1)','ab +2.5','co(0)','-106','+102','95%','uo +2.5','co(0)','+107','+107','90%',
    'file3','ab +3','co(0)','+107','+108','85%','co(0)','ab +4','co(0)','+107','+118','95%','uo +12.5','co(0)','+106','+107','90%',
    'file4','ab +3','co(0)','+107','+108','95%','uo +12.5','co(0)','+106','+107','90%'
]

wanted df results:
      0        1     2        3       4      5
0   file1   ab +1   co(0)   +107    +108    95%
1   file1   ab +1   co(0)   nan      nan    nan
2   file1   uo +2.5 co(0)   +107    +107    90%
3   file1   uo +2.5 co(0)   +107    +110    90%
4   file2   ab +0.5 co(1)    nan    nan     nan
5   file2   ab +2.5 co(0)   +106    +102    95%
6   file2   ab +2.5 co(0)   nan     nan     nan
7   file2   ab +8.0 co(1)   nan     nan     nan
8   file2   ab +2.5 co(0)   -106    +102    95%
9   file2   uo +2.5 co(0)   +107    +107    90%
10  file3   ab +3   co(0)   +107    +108    85%
11  file3   ab +4   co(0)   +107    +118    95%
12  file3   uo +12.5co(0)   +106    +107    90%
13  file4   ab +3   co(0)   +107    +108    95%
14  file4   uo +12.5co(0)   +106    +107    90%


from How to take a list and fill with values and put into a dataframe and filter in Python?

Defining different schema for both get and post request - AutoSchema Django Rest Framework

I am trying to define AutoSchema (to be shown in django rest framework swagger) for my REST API in Django REST framework. There's this class that extends APIView.

The class has both 'get' and 'post' methods. Like:

class Profile(APIView):
permission_classes = (permissions.AllowAny,)
schema = AutoSchema(
    manual_fields=[
        coreapi.Field("username",
                      required=True,
                      location='query',
                      description='Username of the user'),

    ]
)
def get(self, request):
    return
schema = AutoSchema(
    manual_fields=[
        coreapi.Field("username",
                      required=True,
                      location='form',
                      description='Username of the user '),
        coreapi.Field("bio",
                      required=True,
                      location='form',
                      description='Bio of the user'),

    ]
)
def post(self, request):
    return

The problem is I want different schema for both get and post request. How can I achieve this using AutoSchema?



from Defining different schema for both get and post request - AutoSchema Django Rest Framework

Fill holes/blocks in masked frames

I want to fill the "holes" in my masked image based on certain criteria. I have the following raw frame:

enter image description here

Next, I apply a mask over this frame which gives masked:

enter image description here

Now I want to divide the frame into 16x16 blocks. In order to define block(x,y), we define a set of pixels in the x-th and y-th block in vertical and horizontal direction. A block is defined as non-field (field) if the percentage of black pixels within the block is larger (smaller) than 0.5. The code looks as follows:

def blocked_img(img, x_pixels, y_pixels):
   blocked = img.copy()
   gray = cv2.cvtColor(blocked, cv2.COLOR_BGR2GRAY)

   for x in range(0, blocked.shape[1], x_pixels):
      for y in range(0, blocked.shape[0], y_pixels):
         block = gray[y:y+y_pixels, x:x+x_pixels]
         if (cv2.countNonZero(block) / block.size) < 0.5:
            # non-field
            cv2.rectangle(blocked, (x, y), (x+x_pixels, y+y_pixels), (0,255,255), 2)
         else:
            # field
            break

    return blocked

masked.shape returns the following:

 (720, 1280, 3)

So, I call the function blocked_img by using:

blocked = blocked_img(masked, 80, 45)     #  divide by 16

The output, blocked, looks as follows:

enter image description here

At the moment, my code is written that as soon as the 0.5-threshold is not reached, it loops to the next column. It can be seen that in the columns on the right, this method does not produce the desirable outcome as it stops prematurely (columns 9, 11, 12, 13, 16 to be precise)

I would like to continue the loop within the column if one the two following is in place:

  • current block is non-field if previous block is non-field AND [next block is non-field OR next_next block is non-field]
  • current block is non-field if [previous_previous block is non-field OR previous block is non-field] AND next block is non-field

Rewritten,

   block(x,y) = 1 if [block(x-1,y) = 1 and {block(x+1,y) = 1 or block(x+2,y) = 1}] or [{block(x-2, y) = 1 or block(x-1, y) = 1} and block(x+1, y) = 1]       # where 1 = non-field

Any idea how I can include this in my code? Thanks in advance!



from Fill holes/blocks in masked frames

revert minify enabled and progaurd efect on decompiled apk

I have developed an application before and Now I can't find my application source code.
I want to make some changes to my application.

Now I just have my keystore(jks file) which I have use it to sign my application before.
I use online Apk Decompiler to decompile my application but It does not give my desired result. I need it to edit my codes and give it to the customer but It gives me code preview like below with Incomprehensible class and method names.

My decompiled APK folders directory

folder direcotory

My Code Preview

Code preview

I know that this problem is for setting minify enabled to true for release mode in gradle .

Is there any way to revert decompiled code to understandable code or revert it with keystore or decompile application apk file with understandable source code?

I really need to decompile this apk and do some changes on it . Specially application source files.

Any help will be appreciated :)



from revert minify enabled and progaurd efect on decompiled apk

How to make the "slider thumb" on a range slider go outside of the track

I have a vertical range slider and I want the "thumb" (the thing you move on the slider) to be wider than the track itself (very similar to how the android sliders look).

I've tried a lot of stuff and this is the closest I managed to be

This is my code so far:

HTML

<div class="sliderContainer">
   <input type="range">
</div>

JS

$(window).on("load resize", function () {
   var sliderWidth = $('[type=range]').width();

   $('.custom-style-element-related-to-range').remove();

   $('<style class="custom-style-element-related-to-range">input[type="range"]::-webkit-slider-thumb { box-shadow: -' + sliderWidth + 'px 0 0 ' + sliderWidth + 'px;}<style/>').appendTo('head');
});

CSS

.sliderContainer {
    position: absolute;
    margin: 0 auto;
    left: -27px;
    top: 127px;
    width: 0px;
    height: 135px;
}

input[type='range'] {
    width: 120px;
}

@media screen and (-webkit-min-device-pixel-ratio:0) {
    input[type='range'] {
        overflow: hidden;
        -webkit-appearance: none;
        background-color: #D2CECC;
        transform: rotate(270deg);
    }
    input[type='range']::-webkit-slider-runnable-track {
        height: 3px;
        -webkit-appearance: none;
        color: #0098A6;
    }
    input[type='range']::-webkit-slider-thumb {
        width: 10px;
        -webkit-appearance: none;
        height: 10px;
        cursor: pointer;
        background: #434343;
        color: #0098A6;
        border: 10px #0098A6;
        border-radius: 50%;
        margin-top: -3px;
    }
}

input[type="range"]::-moz-range-progress {
    background-color: #0098A6;
}

input[type="range"]::-moz-range-track {
    background-color: #D2CECC;
}

input[type="range"]::-ms-fill-lower {
    background-color: #0098A6;
}

input[type="range"]::-ms-fill-upper {
    background-color: #D2CECC;
}

Any idea on how to progress would be great

EDIT:

This is what I'm trying to achieve:

Objective

I've tried some more stuff and managed to make it so the thumb is "outside" but now the track doesnt change color.

CSS

@media screen and (-webkit-min-device-pixel-ratio:0) {
    input[type='range'] {
        -webkit-appearance: none;
        background-color: #D2CECC;
        transform: rotate(270deg);
    }
    input[type='range']::-webkit-slider-runnable-track {
        height: 3px;
        -webkit-appearance: none;
        color: #0098A6;
    }
    input[type='range']::-webkit-slider-thumb {
        width: 10px;
        -webkit-appearance: none;
        height: 10px;
        cursor: pointer;
        background: #0098A6;
        color: transparent;
        border: 10px #0098A6;
        border-radius: 50%;
        margin-top: -3px;
    }
}


from How to make the "slider thumb" on a range slider go outside of the track

Why immer.js doesn't allow setting dynamic properties on draft?

//I want my action to dispatch payload like
// {type:'update',payload:{'current.contact.mobile':'XXXXXXXXX'}}
//In reducer dynamically select the segment of state update needs to be applied to 
//Below code doesn't work as expected though, draft always remains at same level
draft = dA.key.split('.').reduce((draft, k) => {
  return draft[k]
}, draft);

//Or an ideal syntax may look like below line
draft['current.contact.mobile'] = dA.value;


//Code that works
draft['current']['contact']['mobile'] = dA.value;
I want my action to dispatch payload like {type:'update',payload:{'current.contact.mobile':'XXXXXXXXX'}} And in reducer dynamically select the segment of state that needs to be updated. Is there something fundamentally wrong in doing this, I believe this could make life easier. Is there something that can done to achieve this ?

from Why immer.js doesn't allow setting dynamic properties on draft?

How to make the "slider thumb" on a range slider go outside of the track

I have a vertical range slider and I want the "thumb" (the thing you move on the slider) to be wider than the track itself (very similar to how the android sliders look).

I've tried a lot of stuff and this is the closest I managed to be

This is my code so far:

HTML

<div class="sliderContainer">
   <input type="range">
</div>

JS

$(window).on("load resize", function () {
   var sliderWidth = $('[type=range]').width();

   $('.custom-style-element-related-to-range').remove();

   $('<style class="custom-style-element-related-to-range">input[type="range"]::-webkit-slider-thumb { box-shadow: -' + sliderWidth + 'px 0 0 ' + sliderWidth + 'px;}<style/>').appendTo('head');
});

CSS

.sliderContainer {
    position: absolute;
    margin: 0 auto;
    left: -27px;
    top: 127px;
    width: 0px;
    height: 135px;
}

input[type='range'] {
    width: 120px;
}

@media screen and (-webkit-min-device-pixel-ratio:0) {
    input[type='range'] {
        overflow: hidden;
        -webkit-appearance: none;
        background-color: #D2CECC;
        transform: rotate(270deg);
    }
    input[type='range']::-webkit-slider-runnable-track {
        height: 3px;
        -webkit-appearance: none;
        color: #0098A6;
    }
    input[type='range']::-webkit-slider-thumb {
        width: 10px;
        -webkit-appearance: none;
        height: 10px;
        cursor: pointer;
        background: #434343;
        color: #0098A6;
        border: 10px #0098A6;
        border-radius: 50%;
        margin-top: -3px;
    }
}

input[type="range"]::-moz-range-progress {
    background-color: #0098A6;
}

input[type="range"]::-moz-range-track {
    background-color: #D2CECC;
}

input[type="range"]::-ms-fill-lower {
    background-color: #0098A6;
}

input[type="range"]::-ms-fill-upper {
    background-color: #D2CECC;
}

Any idea on how to progress would be great

EDIT:

This is what I'm trying to achieve:

Objective

I've tried some more stuff and managed to make it so the thumb is "outside" but now the track doesnt change color.

CSS

@media screen and (-webkit-min-device-pixel-ratio:0) {
    input[type='range'] {
        -webkit-appearance: none;
        background-color: #D2CECC;
        transform: rotate(270deg);
    }
    input[type='range']::-webkit-slider-runnable-track {
        height: 3px;
        -webkit-appearance: none;
        color: #0098A6;
    }
    input[type='range']::-webkit-slider-thumb {
        width: 10px;
        -webkit-appearance: none;
        height: 10px;
        cursor: pointer;
        background: #0098A6;
        color: transparent;
        border: 10px #0098A6;
        border-radius: 50%;
        margin-top: -3px;
    }
}


from How to make the "slider thumb" on a range slider go outside of the track

How to let iOS/macOS update our app while its NetworkExtension is running?

We have an app that registers and runs a NetworkExtension (specifically NEPacketTunnelProvider) on both iOS and macOS. This works fine (although not in the iOS simulator, of course, known limitation). However, when the network extension is running, the App Store on macOS/iOS, and TestFlight on iOS, fail to update the app. It looks like they're waiting for the NetworkExtension to terminate, but not actually asking the VPN to disconnect.

We don't want our users to have to think about killing our VPN (which should stay connected whenever possible), but we don't want them to have to think about updates either - they should happen automatically.

Are we missing something? Is there an event we should be listening on to detect that the store is trying to install an app update? Conditions when we should arrange for the VPN to terminate, safe in the knowledge that it will be restarted again after being updated?



from How to let iOS/macOS update our app while its NetworkExtension is running?

Why sql is loading data infinite?

I have two tables for statistics events, and sessions, here is how they look

Here Sessions Table Showing rows 0 - 29 (4 730 018 total))

enter image description here

And here is events table Showing rows 0 - 29 (3686 total)

enter image description here

Now here is data I am displaying enter image description here

Now here is my SQL for displaying data

 SELECT sessions.sid, events.datetime, count(*) as num_rows, count(distinct sessions.sid) as sessions, 
    sum( targetbuttonname = 'kredyt' ) as num_kredyt, 
    sum( targetbuttonname = 'konto' ) as num_konto,
    sum( targetbuttonname = 'czat' ) as num_czat,
    sum( targetbuttonname = 'video-voice_btns' ) as num_voice,
    sum( targetbuttonname = 'video-close_btn' )  as num_close,
    sum( targetbuttonname = 'video-muted_btn' ) as num_muted,
    sum( targetbuttonname = 'video-play_btn' )  as num_play,
    sum( targetbuttonname = 'video-pause_btn' )  as num_pause,
    sum( targetbuttonname = 'video-replay_btn' ) as num_replay, 
    sum(watchtime) as num_watchtime, 
    sum(devicetype ='Computer') as num_computer 
from events INNER JOIN
     sessions
     ON events.sid =sessions.sid;

Now when I have small data everything works fine

Now when I have huge data in a sessions table as you can see above over 4 million data,

And try to run the script above in my PHPmyadmin the request never ends and MariaDB doesn't respond anymore, I have to restart the service manually.

What do I need to do to solve my problem?



from Why sql is loading data infinite?

How to detect that a notification bundle/group was clicked in Android?

In Android Nougat and above, push notifications for your app are grouped together automatically after the # 4th.

The problem in Android is that clicking on the bundle does not expand the push notification list, it opens the app.

I have a requirement to identify users that opened the app trough a push notification. For individual notifications this is easy since I can explore intent extras. The problem with the bundle is that extras is null and intent looks exactly the same as if user clicked on launcher icon. I have no way to detect that navigation was done from a push notification :(

Just in case it's not clear: I'm not using push notifications groups explicitly, this is done automatically by Android. I have not set any group key to the notifications.

Not sure if helps somehow but notifications are generated server side and not client side.



from How to detect that a notification bundle/group was clicked in Android?

Blueprism-like spying and bot development

Blueprism gives the possibility to spy elements (like buttons and textboxes) in both web-browsers and windows applications. How can I spy (windows-based only) applications using Python, R, Java, C++, C# or other, anything but not Blueprism, preferrably opensource.

  1. For web-browsers, I know how to do this, without being an expert. Using Python or R, for example, I can use Selenium or RSelenium, to spy elements of a website using different ways such as CSS selector, xpath, ID, Class Name, Tag, Text etc.
  2. But for Applications, I have no clue. BluePrism has mainly two different App spying modes which are WIN32 and Active Accessibility. How can I do this type of spying and interacting with an application outside of Blueprism, preferrably using an opensource language?

(only interested in windows-based apps for now)

The aim is of course to create robots able to navigate the apps as a human would do.



from Blueprism-like spying and bot development

While sharing a vcf file in iOS , the file is not getting attached in mail using swift

I'm trying to share a vcf file using UIActivityViewController. It shares the file with all other options except mail. It just opens the mail composer without any attachment. Here's my code:

guard let directoryURL = FileManager.default.urls(for: .cachesDirectory, in: .userDomainMask).first else {
        return
        }


        var filename = NSUUID().uuidString



        if let fullname = CNContactFormatter().string(from: contact) {
            filename = fullname.components(separatedBy: " ").joined(separator: " ")
        }

        let fileURL = directoryURL
        .appendingPathComponent(filename)
        .appendingPathExtension("vcf")

        do{
            let data = try CNContactVCardSerialization.data(with: [contact])

            print("filename: \(filename)")
            print("contact: \(String(describing: String(data: data, encoding: String.Encoding.utf8)))")

            try data.write(to: fileURL, options: [.atomicWrite])
        }
        catch{
            print(error.localizedDescription)
        }

        let activityViewController = UIActivityViewController(
            activityItems: [fileURL],
        applicationActivities: nil
        )

        present(activityViewController, animated: true, completion: nil)

I want to attach this contact as a vcf file in mail app when user selects mail option for sharing.



from While sharing a vcf file in iOS , the file is not getting attached in mail using swift

How to apply if condtion and apply to dataframe

Need to check the 'detected' key of bool3_res with key is_doc1 of bool_res and bool_2 res

  1. if bool3_res['detected'] == bool1_res['is_doc1'] == True then my resp has to return

  2. if bool3_res['detected'] == bool2_res['is_doc1'] == True then my resp has to return\

3: else return 'Not valid'

Data frame

user_uid,bool1,bool2,bool3,bool1_res,bool2_res,bool3_res
1001,27452.webp,981.webp,d92e.webp,"{'is_doc1': False, 'is_doc2': True}","{'is_doc1': True, 'is_doc2': True}","{'detected': True, 'count': 1}"
1002,27452.webp,981.webp,d92e.webp,"{'is_doc1': True, 'is_doc2': True}","{'is_doc1': False, 'is_doc2': True}","{'detected': True, 'count': 1}"

My code

def new_func(x):
    d1 = df['bool1_res'].to_dict()
    d1 = eval(d1[0])
    d2 = df['bool2_res'].to_dict()
    d2 = eval(d2[0])
    d3 = df['bool3_res'].to_dict()
    d3 = eval(d3[0])

    if d1['is_doc1'] == d3['detected'] == True:
        resp = {
            "task_id": "uid",
            "group_id": "uid",
            "data": {
            "document1": df['bool1'],
            "document2": df['bool3']
            }
            }

    elif d2['is_doc1'] == d3['detected'] == True:
        resp = {
            "task_id": "user_uid",
            "group_id": "uid",
            "data": {
            "document1": df['bool2'],
            "document2": df['bool3']
            }
            }
    elif d3['detected'] == False:
        resp = 'Not valid'
    else:
        resp = 'Not valid'
    return resp
df['new'] = df.apply(new_func, axis = 1)
#df['new'] = df[['bool1', 'bool2', 'bool3', 'bool1_res', 'bool2_res', 'bool3_res']].applymap(new_func)

My expected out

df['new']

{'u_id': 'uid', 'group': 'uid', 'data': {'document1': ['981.webp'], 'document2': {'d92e.webp'}}}"
{'u_id': 'uid', 'group': 'uid', 'data': {'document1': ['27452.webp'], 'document2': {'d92e.webp'}}}"

My Out df['new']

0    {'task_id': 'user_uid', 'group_id': 'uid', 'data': {'document1': ['981.webp', '981.webp'], 'document2': ['d92e.webp', 'd92e.webp']}}
1    {'task_id': 'user_uid', 'group_id': 'uid', 'data': {'document1': ['981.webp', '981.webp'], 'document2': ['d92e.webp', 'd92e.webp']}}
Name: new, dtype: object


from How to apply if condtion and apply to dataframe

Slowly Changing Lookup Cache from BigQuery - Dataflow Python Streaming SDK

I am trying to follow the design pattern for Slowly Changing Lookup Cache (https://cloud.google.com/blog/products/gcp/guide-to-common-cloud-dataflow-use-case-patterns-part-1) for a streaming pipeline using the Python SDK for Apache Beam on DataFlow.

Our reference table for the lookup cache sits in BigQuery, and we are able to read and pass it in as a Side Input to the ParDo operation but it does not refresh regardless of how we set up the trigger/windows.

class FilterAlertDoFn(beam.DoFn):
  def process(self, element, alertlist):

    print len(alertlist)
    print alertlist

    …  # function logic

alert_input = (p | beam.io.Read(beam.io.BigQuerySource(query=ALERT_QUERY))
                        | ‘alert_side_input’ >> beam.WindowInto(
                            beam.window.GlobalWindows(),
                            trigger=trigger.RepeatedlyTrigger(trigger.AfterWatermark(
                                late=trigger.AfterCount(1)
                            )),
                            accumulation_mode=trigger.AccumulationMode.ACCUMULATING
                          )
                       | beam.Map(lambda elem: elem[‘SOMEKEY’])
)

...


main_input | ‘alerts’ >> beam.ParDo(FilterAlertDoFn(), beam.pvalue.AsList(alert_input))

Based on the I/O page here (https://beam.apache.org/documentation/io/built-in/) it says Python SDK supports streaming for the BigQuery Sink only, does that mean that BQ reads are a bounded source and therefore can’t be refreshed in this method?

Trying to set non-global windows on the source results in an empty PCollection in the Side Input.


UPDATE: When trying to implement the strategy suggested by Pablo's answer, the ParDo operation that uses the side input wont run.

There is a single input source that goes to two output's, one of then using the Side Input. The Non-SideInput will still reach it's destination and the SideInput pipeline wont enter the FilterAlertDoFn().

By substituting the side input for a dummy value the pipeline will enter the function. Is it perhaps waiting for a suitable window that doesn't exist?

With the same FilterAlertDoFn() as above, my side_input and call now look like this:

def refresh_side_input(_):
   query = 'select col from table'
   client = bigquery.Client(project='gcp-project')
   query_job = client.query(query)

   return query_job.result()


trigger_input = ( p | 'alert_ref_trigger' >> beam.io.ReadFromPubSub(
            subscription=known_args.trigger_subscription))


bigquery_side_input = beam.pvalue.AsSingleton((trigger_input
         | beam.WindowInto(beam.window.GlobalWindows(),
                           trigger=trigger.Repeatedly(trigger.AfterCount(1)),
                           accumulation_mode=trigger.AccumulationMode.DISCARDING)
         | beam.Map(refresh_side_input)
        ))

...

# Passing this as side input doesn't work
main_input | 'alerts' >> beam.ParDo(FilterAlertDoFn(), bigquery_side_input)

# Passing dummy variable as side input does work
main_input | 'alerts' >> beam.ParDo(FilterAlertDoFn(), [1])

I tried a few different versions of refresh_side_input(), They report the expect result when checking the return inside the function.


UPDATE 2:

I made some minor modifications to Pablo's code, and I get the same behaviour - the DoFn never executes.

In the below example I will see 'in_load_conversion_data' whenever I post to some_other_topic but will never see 'in_DoFn' when posting to some_topic

import apache_beam as beam
import apache_beam.transforms.window as window

from apache_beam.transforms import trigger
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions


def load_my_conversion_data():
    return {'EURUSD': 1.1, 'USDMXN': 4.4}


def load_conversion_data(_):
    # I will suppose that these are currency conversions. E.g.
    # {'EURUSD': 1.1, 'USDMXN' 20,}
    print 'in_load_conversion_data'
    return load_my_conversion_data()


class ConvertTo(beam.DoFn):
    def __init__(self, target_currency):
        self.target_currency = target_currency

    def process(self, elm, rates):
        print 'in_DoFn'
        elm = elm.attributes
        if elm['currency'] == self.target_currency:
            yield elm
        elif ' % s % s' % (elm['currency'], self.target_currency) in rates:
            rate = rates[' % s % s' % (elm['currency'], self.target_currency)]
            result = {}.update(elm).update({'currency': self.target_currency,
            'value': elm['value']*rate})
             yield result
         else:
             return  # We drop that value


pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
p = beam.Pipeline(options=pipeline_options)

some_topic = 'projects/some_project/topics/some_topic'
some_other_topic = 'projects/some_project/topics/some_other_topic'

with beam.Pipeline(options=pipeline_options) as p:

    table_pcv = beam.pvalue.AsSingleton((
      p
      | 'some_other_topic' >>  beam.io.ReadFromPubSub(topic=some_other_topic,  with_attributes=True)
      | 'some_other_window' >> beam.WindowInto(window.GlobalWindows(),
                        trigger=trigger.Repeatedly(trigger.AfterCount(1)),
                        accumulation_mode=trigger.AccumulationMode.DISCARDING)
      | beam.Map(load_conversion_data)))


    _ = (p | 'some_topic' >> beam.io.ReadFromPubSub(topic=some_topic)
         | 'some_window' >> beam.WindowInto(window.FixedWindows(1))
         | beam.ParDo(ConvertTo('USD'), rates=table_pcv))


from Slowly Changing Lookup Cache from BigQuery - Dataflow Python Streaming SDK

Embed widgets with jupyter-cadquery (threejs): wrong position on load

I am using jupyter-cadquery to visualize some 3D models made with CadQuery.

When visualizing the models on a Jupyter notebook, everything works as expected.

But when trying to embed the widget in an HTML document, it seems the camera, on load, is pointing to (0, 0, 0), not as expected. Once you interact with the widget, the camera will point to the expected coordinate.

Here is the code to reproduce the error and an animation of the mentioned problem (see instructions bellow on how to reproduce it with Binder):

from cadquery import Workplane
from ipywidgets import embed
from jupyter_cadquery.cad_view import CadqueryView
from jupyter_cadquery.cadquery import Assembly
from jupyter_cadquery.cadquery import Part


# Create a simple assembly
box1 = Workplane('XY').box(10, 10, 10).translate((0, 0, 5))
a1 = Assembly([Part(box1)], "example 1")

# Generate HTML
a1.collect_shapes()
view = CadqueryView()
for shape in a1.collect_shapes():
    view.add_shape(shape["name"], shape["shape"], shape["color"])
renderer = view.render()
embed.embed_minimal_html('export.html', views=renderer, title='Renderer')

renderer

output

Note how the view of the cube "jumps" suddenly on interaction.

Could it be an issue with ipywidgets? Since the view is okay when displayed in the notebook.

How could it be fixed?

How to reproduce

You can reproduce it with Binder, without needing to create a local environment (admitedly, installing CadQuery/jupyter-cadquery is not the easiest/fastest thing to do):

https://mybinder.org/v2/gh/bernhard-42/jupyter-cadquery/master?urlpath=lab&filepath=examples%2Fcadquery.ipynb

Just execute the code above in a new empty notebook. See how the renderer shows the 3D model without any issues on the notebook:

Screenshot from 2019-12-23 21-28-42

After execution, an export.html document will also appear in the file list on the left. Open it and make sure to click on the "Trust HTML" button on top of the viewer and hit refresh. If you interact with the view, you can reproduce the issue.

Screenshot from 2019-12-23 21-25-21

Note that, also, the perspective is lost (that is not an orthogonal view). Fixing that would be a plus! ^^



from Embed widgets with jupyter-cadquery (threejs): wrong position on load

RecyclerView Jumps in middle of the list when paginatining

I have this page which I've setUp with mvvm and pagination using recyclerView and Pull to refresh. Clicking on some items in this recyclerView will navigate to another fragment.

My problem is whenever I load the page for the first time and scroll all the way down in works perfectly. Pull to refresh will work as well.

But when I navigate to the other fragment and get back, scroll all the way top, swipe to refresh : The recyclerView will jump to middle of the page( Right on the item where I clicked to navigate to the other fragment)

My Fragment

override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
        super.onViewCreated(view, savedInstanceState)
        val adapter = GroupAdapter<ViewHolder>().apply { add(mainSection) }
        val gridLayoutManager = GridLayoutManager(activity, adapter.spanCount)
        gridLayoutManager.spanSizeLookup = adapter.spanSizeLookup
        with(recyclerView) {
            layoutManager = gridLayoutManager
            setHasFixedSize(true)
            addOnScrollListener(PaginationScrollListener {
                viewModel.onEndOfList()
            })
        }
        recyclerView.adapter = adapter


        pullToRefresh.apply {
            setProgressBackgroundColorSchemeColor(
                ContextCompat.getColor(context, R.color.window_level_2)
            )
            setColorSchemeColors(ContextCompat.getColor(context, R.color.brand_primary))
            setOnRefreshListener { viewModel.onRefresh() }
        }
    }

My ViewModel

    fun onRefresh() {
        page = 0
        widgetItems.clear()
        _widgetListObservable.postValue(widgetItems)
        finishedLoading = false
        isFirstFetch = true
        getItems()
    }

private fun getItems() {
        isLoading = true
        dataSource.getPage(page)
            .subscribeOn(backgroundThread.getScheduler())
            .observeOn(mainThread.getScheduler())
            .flatMap {
                Flowable.fromIterable(it)
                    .toList()
                    .toFlowable()
            }
            .doAfterTerminate {
                isLoading = false
            }
            .subscribe(Consumer {
                finishedLoading = it.isEmpty()

                if (isFirstFetch && finishedLoading) {
                    _isMyPaymentsEmptyObservable.postValue(true)
                }
                widgetItems.addAll(it)
                _widgetListObservable.postValue(widgetItems)
                page++
                isFirstFetch = false
            }, {
              println(it)
            })

EDIT when I remove the onRefreshListener on the swipeToRefreshin the first fragment it works. I have no idea why this happens?

Thanks for reading.



from RecyclerView Jumps in middle of the list when paginatining

How can I read from a file with Python from a specific location to a specific location?

Currently, I'm doing:

    source_noise = np.fromfile('data/noise/' + source + '_16k.dat', sep='\n')
    source_noise_start = np.random.randint(
        0, len(source_noise) - len(audio_array))
    source_noise = source_noise[source_noise_start:
                                source_noise_start + len(audio_array)]

My file looks like:

  -5.3302745e+02
  -5.3985005e+02
  -5.8963920e+02
  -6.5875741e+02
  -5.7371864e+02
  -2.0796765e+02
   2.8152341e+02
   6.5398089e+02
   8.6053581e+02

.. and on and on.

This requires that I read the entire file, when all I want to do is read a part of a file. Is there any way for me to do this with Python that will be FASTER than what I'm doing now?



from How can I read from a file with Python from a specific location to a specific location?

ValueError: [E088] Text of length 1027203 exceeds maximum of 1000000. spacy

I'm trying to create a corpus of words by a text. I use spacy. So there is my code:

import spacy
nlp = spacy.load('fr_core_news_md')
f = open("text.txt")
doc = nlp(''.join(ch for ch in f.read() if ch.isalnum() or ch == " "))
f.close()
del f
words = []
for token in doc:
    if token.lemma_ not in words:
        words.append(token.lemma_)

f = open("corpus.txt", 'w')
f.write("Number of words:" + str(len(words)) + "\n" + ''.join([i + "\n" for i in sorted(words)]))
f.close()

But it returns this exception:

ValueError: [E088] Text of length 1027203 exceeds maximum of 1000000. The v2.x parser and NER models require roughly 1GB of temporary memory per 100,000 characters in the input. This means long texts may cause memory allocation errors. If you're not using the parser or NER, it's probably safe to increase the `nlp.max_length` limit. The limit is in number of characters, so you can check whether your inputs are too long by checking `len(text)`.

I tried somthing like this:

import spacy
nlp = spacy.load('fr_core_news_md')
nlp.max_length = 1027203
f = open("text.txt")
doc = nlp(''.join(ch for ch in f.read() if ch.isalnum() or ch == " "))
f.close()
del f
words = []
for token in doc:
    if token.lemma_ not in words:
        words.append(token.lemma_)

f = open("corpus.txt", 'w')
f.write("Number of words:" + str(len(words)) + "\n" + ''.join([i + "\n" for i in sorted(words)]))
f.close()

But got the same error:

ValueError: [E088] Text of length 1027203 exceeds maximum of 1000000. The v2.x parser and NER models require roughly 1GB of temporary memory per 100,000 characters in the input. This means long texts may cause memory allocation errors. If you're not using the parser or NER, it's probably safe to increase the `nlp.max_length` limit. The limit is in number of characters, so you can check whether your inputs are too long by checking `len(text)`.

How to fix it?



from ValueError: [E088] Text of length 1027203 exceeds maximum of 1000000. spacy

How To Deploy Angular 7 with Net Core API Separately on Azure

Right now I have a .Net Core Application with Angular 7 as the Client

I have both the API and Client deployed on Azure however I am stuck on how to get the client working on azure where it reaches the .Net Core endpoints. On my login post request I am getting a 404 however it works on my local build... I'm not sure exaclty what I'm doing wrong?

    Request Method: POST
Status Code: 404 Not Found
Referrer Policy: no-referrer-when-downgrade


from How To Deploy Angular 7 with Net Core API Separately on Azure

Twilio fax - How to manage status responses in Wordpress in PHP?

I want to send and manage faxes using PHP only (no JavaScript or cron, if possible) through Twilio from my linux+Apache+Wordpress site. (Development is on a Wordpress site through my localhost using ngrok.)

I have been able to set up a Wordpress page (template) to send faxes using Twilio's fax API. After the fax is sent, though, Twilio is replying with JSON in a POST response (I think), and expecting my site to respond in some way. I have a callback URL in place, but this seems to be an asynchronous call, which I don't know how to handle though Wordpress. (I want to stay within Wordpress site if possible for security and convenience.)

I have no experience with managing this type of communication between servers; I have been reading the Twilio docs, but I think I am making a fundamental mistake somewhere...I get the gist of what needs to be done, but not how it works.

How does managing Twilio's asynchronous calls work using PHP within Wordpress?



from Twilio fax - How to manage status responses in Wordpress in PHP?

HTML Map that Reads from Personal Geodatabase

I am looking for solutions on how to possibly load polygon data from an ESRI Personal Geodatabase (.mdb) to an HTML map?

Ideally I would like to load an HTML file to a web-browser and have it read polygon information from a personal geodtabase as a layer on a map. Users will be adding polygons to the geodatabase, so each time the HTML document is opened, I would like it to read the data each time. I am not particular regarding the basemap (leaflet, google maps, ArcGIS Online), but I would like it to read the data from the geodatabase each time it is opened, so that the information shown is the most current. Ultimately, this HTML document will be embedded within an Microsoft Access database form for the user to interact with, and view previously entered polygons within the Personal Geodatabase.

Goal

To load an HTML Map within a web-browser. The map to display polygons that are read from a personal geodatabase. This map will then be embedded into a Microsoft Access Form for the user to interact with through the web-browser.

Options I have Considered:

Google Maps:

I have looked at using google maps that reads a kml/kmz file with the polygon information from the geodatabase, but this requires me to update the kml/kmz each time a new polygon is entered. A more automated solution would be preferable.

Leaflet

Having researched leaflet, it appears that it is possible to load a map from a file geodatabase, but not a personal geodatabase. Unfortunately, I am handcuffed by the personal geodatabase in this particular scenario (many users, multiple geodatabases, and Microsoft Access forms)

ArcGIS Online

Is it possible to have arcGIS online read from a local file each time it is opened. I realize ArcGIS online requires a zip file, but I could automate the compression of a document. Any suggestions regarding the direction I should take this would be greatly appreciated.

Slippy Maps

I haven't researched this very much, as I feel that I've already gone down the rabbit hole with the other options and am running out of time.

Thank you for taking the time to read my problem, and providing feedback. I really appreciate it.

Kev



from HTML Map that Reads from Personal Geodatabase